code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
_lowerCAmelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
_lowerCAmelCase : str = "audio"
_lowerCAmelCase : str = "transcription"
def A( self , lowercase__):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , lowercase__):
raise ValueError(F"Column {self.audio_column} is not an Audio type.")
__UpperCAmelCase : Union[str, Any] = copy.deepcopy(self)
__UpperCAmelCase : int = self.input_schema.copy()
__UpperCAmelCase : List[Any] = features[self.audio_column]
__UpperCAmelCase : int = input_schema
return task_template
@property
def A( self):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 717 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 0 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = int(lowercase_ )
__UpperCAmelCase : List[Any] = t // 3600, (t // 60) % 60, t % 60
return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}"
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=300 ) -> Optional[int]:
'''simple docstring'''
return f"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n "
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__UpperCAmelCase : Optional[Any] = f"{elt:.6f}" if isinstance(lowercase_ , lowercase_ ) else str(lowercase_ )
html_code += f" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCamelCase :
_lowerCAmelCase : Optional[Any] = 5
_lowerCAmelCase : List[Any] = 0.2
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = 3_0_0 , ):
__UpperCAmelCase : Optional[Any] = total
__UpperCAmelCase : Tuple = '''''' if prefix is None else prefix
__UpperCAmelCase : Any = leave
__UpperCAmelCase : Any = parent
__UpperCAmelCase : List[str] = width
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Tuple = None
def A( self , lowercase__ , lowercase__ = False , lowercase__ = None):
__UpperCAmelCase : Tuple = value
if comment is not None:
__UpperCAmelCase : Optional[Any] = comment
if self.last_value is None:
__UpperCAmelCase : Any = time.time()
__UpperCAmelCase : List[str] = value
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Dict = self.warmup
__UpperCAmelCase : Optional[Any] = 1
self.update_bar(lowercase__)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total):
if self.first_calls > 0:
self.first_calls -= 1
__UpperCAmelCase : int = time.time()
__UpperCAmelCase : List[str] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__UpperCAmelCase : List[Any] = self.elapsed_time / (value - self.start_value)
else:
__UpperCAmelCase : Optional[int] = None
if value >= self.total:
__UpperCAmelCase : Union[str, Any] = self.total
__UpperCAmelCase : Tuple = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__UpperCAmelCase : Tuple = self.average_time_per_item * (self.total - value)
self.update_bar(lowercase__)
__UpperCAmelCase : List[str] = value
__UpperCAmelCase : Dict = current_time
if self.average_time_per_item is None:
__UpperCAmelCase : str = 1
else:
__UpperCAmelCase : Optional[Any] = max(int(self.update_every / self.average_time_per_item) , 1)
def A( self , lowercase__ , lowercase__=None):
__UpperCAmelCase : int = ''' ''' * (len(str(self.total)) - len(str(lowercase__))) + str(lowercase__)
if self.elapsed_time is None:
__UpperCAmelCase : str = F"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
__UpperCAmelCase : List[str] = F"[{spaced_value}/{self.total} {format_time(self.elapsed_time)}"
else:
__UpperCAmelCase : Dict = (
F"[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <"
F" {format_time(self.predicted_remaining)}"
)
self.label += F", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment) == 0 else F", {self.comment}]"
self.display()
def A( self):
__UpperCAmelCase : Tuple = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__UpperCAmelCase : List[str] = disp.display(disp.HTML(self.html_code) , display_id=lowercase__)
else:
self.output.update(disp.HTML(self.html_code))
def A( self):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''''''))
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__=None):
super().__init__(lowercase__)
__UpperCAmelCase : str = None if column_names is None else [column_names]
__UpperCAmelCase : List[Any] = None
def A( self):
__UpperCAmelCase : Dict = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__UpperCAmelCase : Optional[Any] = disp.display(disp.HTML(self.html_code) , display_id=lowercase__)
else:
self.output.update(disp.HTML(self.html_code))
def A( self , lowercase__):
if self.inner_table is None:
__UpperCAmelCase : Dict = [list(values.keys()), list(values.values())]
else:
__UpperCAmelCase : Optional[Any] = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowercase__)
__UpperCAmelCase : Dict = columns
self.inner_table.append([values[c] for c in columns])
def A( self , lowercase__ , lowercase__=None , lowercase__=3_0_0):
__UpperCAmelCase : int = NotebookProgressBar(lowercase__ , prefix=lowercase__ , parent=self , width=lowercase__)
return self.child_bar
def A( self):
__UpperCAmelCase : str = None
self.display()
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self):
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Dict = False
def A( self , lowercase__ , lowercase__ , lowercase__ , **lowercase__):
__UpperCAmelCase : List[Any] = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Any = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''')
__UpperCAmelCase : List[str] = NotebookTrainingTracker(state.max_steps , lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ , **lowercase__):
__UpperCAmelCase : Any = int(state.epoch) if int(state.epoch) == state.epoch else F"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1 , comment=F"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , )
__UpperCAmelCase : List[Any] = False
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None , **lowercase__):
if not has_length(lowercase__):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__UpperCAmelCase : Union[str, Any] = self.training_tracker.add_child(len(lowercase__))
else:
__UpperCAmelCase : Tuple = NotebookProgressBar(len(lowercase__))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , **lowercase__):
if self.prediction_bar is not None:
self.prediction_bar.close()
__UpperCAmelCase : Union[str, Any] = None
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None , **lowercase__):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__UpperCAmelCase : Union[str, Any] = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__UpperCAmelCase : List[Any] = state.global_step
self.training_tracker.write_line(lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None , **lowercase__):
if self.training_tracker is not None:
__UpperCAmelCase : Tuple = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history):
if "loss" in log:
__UpperCAmelCase : Tuple = log['''loss''']
break
if self.first_column == "Epoch":
__UpperCAmelCase : List[str] = int(state.epoch)
else:
__UpperCAmelCase : Any = state.global_step
__UpperCAmelCase : Union[str, Any] = '''eval'''
for k in metrics:
if k.endswith('''_loss'''):
__UpperCAmelCase : int = re.sub(r'''\_loss$''' , '''''' , lowercase__)
__UpperCAmelCase : List[Any] = metrics.pop('''total_flos''' , lowercase__)
__UpperCAmelCase : List[str] = metrics.pop('''epoch''' , lowercase__)
__UpperCAmelCase : int = metrics.pop(F"{metric_key_prefix}_runtime" , lowercase__)
__UpperCAmelCase : Dict = metrics.pop(F"{metric_key_prefix}_samples_per_second" , lowercase__)
__UpperCAmelCase : Any = metrics.pop(F"{metric_key_prefix}_steps_per_second" , lowercase__)
__UpperCAmelCase : Optional[Any] = metrics.pop(F"{metric_key_prefix}_jit_compilation_time" , lowercase__)
for k, v in metrics.items():
if k == F"{metric_key_prefix}_loss":
__UpperCAmelCase : List[str] = v
else:
__UpperCAmelCase : Dict = k.split('''_''')
__UpperCAmelCase : Optional[int] = ''' '''.join([part.capitalize() for part in splits[1:]])
__UpperCAmelCase : Optional[Any] = v
self.training_tracker.write_line(lowercase__)
self.training_tracker.remove_child()
__UpperCAmelCase : List[Any] = None
# Evaluation takes a long time so we should force the next update.
__UpperCAmelCase : Any = True
def A( self , lowercase__ , lowercase__ , lowercase__ , **lowercase__):
self.training_tracker.update(
state.global_step , comment=F"Epoch {int(state.epoch)}/{state.num_train_epochs}" , force_update=lowercase__)
__UpperCAmelCase : Optional[int] = None
| 718 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
lowerCAmelCase = {
"""junnyu/roformer_chinese_small""": 1_536,
"""junnyu/roformer_chinese_base""": 1_536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
lowerCAmelCase = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase : Tuple = RoFormerTokenizer
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__="[UNK]" , lowercase__="[SEP]" , lowercase__="[PAD]" , lowercase__="[CLS]" , lowercase__="[MASK]" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
__UpperCAmelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get('''lowercase''' , lowercase__) != do_lower_case
or pre_tok_state.get('''strip_accents''' , lowercase__) != strip_accents
):
__UpperCAmelCase : Optional[int] = getattr(lowercase__ , pre_tok_state.pop('''type'''))
__UpperCAmelCase : Optional[int] = do_lower_case
__UpperCAmelCase : Dict = strip_accents
__UpperCAmelCase : str = pre_tok_class(**lowercase__)
__UpperCAmelCase : Dict = do_lower_case
def __getstate__( self):
__UpperCAmelCase : str = self.__dict__.copy()
__UpperCAmelCase : Any = BertPreTokenizer()
return state
def __setstate__( self , lowercase__):
__UpperCAmelCase : Optional[Any] = d
__UpperCAmelCase : Optional[int] = self.__dict__['''_tokenizer'''].get_vocab()
__UpperCAmelCase : Optional[Any] = PreTokenizer.custom(JiebaPreTokenizer(lowercase__))
def A( self , lowercase__ , lowercase__=None):
__UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : List[str] = self._tokenizer.model.save(lowercase__ , name=lowercase__)
return tuple(lowercase__)
def A( self , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=False , **lowercase__ , ):
__UpperCAmelCase : Optional[int] = BertPreTokenizer()
return super().save_pretrained(lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__)
| 719 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 0 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 720 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Optional[int] = ['''pixel_values''']
def __init__( self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_5_5 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__)
__UpperCAmelCase : Any = size if size is not None else {'''shortest_edge''': 2_2_4}
__UpperCAmelCase : List[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__)
__UpperCAmelCase : int = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__UpperCAmelCase : Optional[Any] = get_size_dict(lowercase__ , param_name='''crop_size''')
__UpperCAmelCase : List[Any] = do_resize
__UpperCAmelCase : Any = size
__UpperCAmelCase : str = crop_pct
__UpperCAmelCase : Tuple = resample
__UpperCAmelCase : Optional[int] = do_center_crop
__UpperCAmelCase : Optional[int] = crop_size
__UpperCAmelCase : List[Any] = do_rescale
__UpperCAmelCase : Dict = rescale_factor
__UpperCAmelCase : Tuple = do_normalize
__UpperCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCAmelCase : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A( self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
__UpperCAmelCase : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__)
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}")
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCAmelCase : Optional[Any] = int(size['''shortest_edge'''] / crop_pct)
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCAmelCase : Union[str, Any] = int(size['''height'''] / crop_pct)
else:
__UpperCAmelCase : Dict = (int(size['''height'''] / crop_pct), int(size['''width'''] / crop_pct))
else:
raise ValueError('''Invalid size for resize: {}'''.format(lowercase__))
__UpperCAmelCase : Tuple = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__)
else:
if "shortest_edge" in size:
__UpperCAmelCase : int = get_resize_output_image_size(lowercase__ , size=size['''shortest_edge'''] , default_to_square=lowercase__)
elif "height" in size and "width" in size:
__UpperCAmelCase : Any = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(lowercase__))
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
__UpperCAmelCase : int = get_size_dict(lowercase__)
if "height" not in size or "width" not in size:
raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}")
return center_crop(lowercase__ , size=(size['''height'''], size['''width''']) , data_format=lowercase__ , **lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__)
def A( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
__UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : str = crop_pct if crop_pct is not None else self.crop_pct
__UpperCAmelCase : Dict = resample if resample is not None else self.resample
__UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : int = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : Any = image_std if image_std is not None else self.image_std
__UpperCAmelCase : Tuple = size if size is not None else self.size
__UpperCAmelCase : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__)
__UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : Optional[int] = get_size_dict(lowercase__ , param_name='''crop_size''')
__UpperCAmelCase : str = make_list_of_images(lowercase__)
if not valid_images(lowercase__):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
__UpperCAmelCase : int = [to_numpy_array(lowercase__) for image in images]
if do_resize:
__UpperCAmelCase : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__) for image in images]
if do_center_crop:
__UpperCAmelCase : List[Any] = [self.center_crop(image=lowercase__ , size=lowercase__) for image in images]
if do_rescale:
__UpperCAmelCase : Tuple = [self.rescale(image=lowercase__ , scale=lowercase__) for image in images]
if do_normalize:
__UpperCAmelCase : List[str] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__) for image in images]
__UpperCAmelCase : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__) for image in images]
__UpperCAmelCase : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__)
| 721 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowerCAmelCase = """__DUMMY_TRANSFORMERS_USER__"""
lowerCAmelCase = """Dummy User"""
lowerCAmelCase = """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"""
lowerCAmelCase = """https://hub-ci.huggingface.co"""
lowerCAmelCase = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
lowerCAmelCase = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
lowerCAmelCase = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , lowercase_ )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
HfFolder.save_token(lowercase_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=lowercase_ )
@pytest.fixture(scope='''session''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = HfFolder.get_token()
HfFolder.save_token(lowercase_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
def _cleanup_repo(lowercase_ ):
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
'''simple docstring'''
@contextmanager
def _temporary_repo(lowercase_ ):
try:
yield repo_id
finally:
cleanup_repo(lowercase_ )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[str] = f"repo_txt_data-{int(time.time() * 10e3 )}"
__UpperCAmelCase : Dict = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(lowercase_ , token=lowercase_ , repo_type='''dataset''' , private=lowercase_ )
hf_api.upload_file(
token=lowercase_ , path_or_fileobj=str(lowercase_ ) , path_in_repo='''data/text_data.txt''' , repo_id=lowercase_ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Any = f"repo_zipped_txt_data-{int(time.time() * 10e3 )}"
__UpperCAmelCase : Tuple = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(lowercase_ , token=lowercase_ , repo_type='''dataset''' , private=lowercase_ )
hf_api.upload_file(
token=lowercase_ , path_or_fileobj=str(lowercase_ ) , path_in_repo='''data.zip''' , repo_id=lowercase_ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : int = f"repo_zipped_img_data-{int(time.time() * 10e3 )}"
__UpperCAmelCase : Dict = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(lowercase_ , token=lowercase_ , repo_type='''dataset''' , private=lowercase_ )
hf_api.upload_file(
token=lowercase_ , path_or_fileobj=str(lowercase_ ) , path_in_repo='''data.zip''' , repo_id=lowercase_ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 700 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , *lowercase__ , **lowercase__):
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__)
| 701 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : torch.FloatTensor
_lowerCAmelCase : torch.FloatTensor
_lowerCAmelCase : Optional[torch.FloatTensor] = None
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
_lowerCAmelCase : Tuple = 2
@register_to_config
def __init__( self , lowercase__ = 0.0_2 , lowercase__ = 1_0_0 , lowercase__ = 1.0_0_7 , lowercase__ = 8_0 , lowercase__ = 0.0_5 , lowercase__ = 5_0 , ):
# standard deviation of the initial noise distribution
__UpperCAmelCase : str = sigma_max
# setable values
__UpperCAmelCase : int = None
__UpperCAmelCase : np.IntTensor = None
__UpperCAmelCase : torch.FloatTensor = None # sigma(t_i)
def A( self , lowercase__ , lowercase__ = None):
return sample
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : str = num_inference_steps
__UpperCAmelCase : Optional[int] = np.arange(0 , self.num_inference_steps)[::-1].copy()
__UpperCAmelCase : List[Any] = torch.from_numpy(lowercase__).to(lowercase__)
__UpperCAmelCase : List[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__UpperCAmelCase : Any = torch.tensor(lowercase__ , dtype=torch.floataa , device=lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ = None):
if self.config.s_min <= sigma <= self.config.s_max:
__UpperCAmelCase : Optional[Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1)
else:
__UpperCAmelCase : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
__UpperCAmelCase : Any = self.config.s_noise * randn_tensor(sample.shape , generator=lowercase__).to(sample.device)
__UpperCAmelCase : str = sigma + gamma * sigma
__UpperCAmelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = True , ):
__UpperCAmelCase : Dict = sample_hat + sigma_hat * model_output
__UpperCAmelCase : List[str] = (sample_hat - pred_original_sample) / sigma_hat
__UpperCAmelCase : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase__ , derivative=lowercase__ , pred_original_sample=lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = True , ):
__UpperCAmelCase : Optional[int] = sample_prev + sigma_prev * model_output
__UpperCAmelCase : Optional[Any] = (sample_prev - pred_original_sample) / sigma_prev
__UpperCAmelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase__ , derivative=lowercase__ , pred_original_sample=lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__):
raise NotImplementedError()
| 702 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __SCREAMING_SNAKE_CASE ( lowercase_ = "laptop" ) -> DataFrame:
'''simple docstring'''
__UpperCAmelCase : str = f"https://www.amazon.in/laptop/s?k={product}"
__UpperCAmelCase : Tuple = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
__UpperCAmelCase : Tuple = BeautifulSoup(requests.get(lowercase_ , headers=lowercase_ ).text )
# Initialize a Pandas dataframe with the column titles
__UpperCAmelCase : Tuple = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
__UpperCAmelCase : int = item.ha.text
__UpperCAmelCase : int = '''https://www.amazon.in/''' + item.ha.a['''href''']
__UpperCAmelCase : List[Any] = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
__UpperCAmelCase : Any = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
__UpperCAmelCase : Optional[int] = '''Not available'''
try:
__UpperCAmelCase : Any = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
__UpperCAmelCase : str = ''''''
try:
__UpperCAmelCase : Optional[Any] = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
__UpperCAmelCase : str = float('''nan''' )
except AttributeError:
pass
__UpperCAmelCase : int = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__UpperCAmelCase : Any = ''' '''
__UpperCAmelCase : Dict = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCAmelCase = """headphones"""
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 703 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 | 0 |
import argparse
import os
import re
lowerCAmelCase = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
lowerCAmelCase = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""")
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = False ) -> Optional[Any]:
'''simple docstring'''
with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as f:
__UpperCAmelCase : Optional[int] = f.read()
__UpperCAmelCase : int = content.split('''\n''' )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Union[str, Any] = 0
while line_idx < len(lowercase_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__UpperCAmelCase : Optional[int] = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__UpperCAmelCase : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__UpperCAmelCase : Optional[Any] = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__UpperCAmelCase : Optional[Any] = sorted(lowercase_ , key=lambda lowercase_ : _re_identifier.search(lowercase_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowercase_ ) )
elif "\n".join(lowercase_ ) != content:
return True
def __SCREAMING_SNAKE_CASE ( lowercase_ = False ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = [os.path.join(lowercase_ , lowercase_ ) for f in os.listdir(lowercase_ ) if f.endswith('''.py''' )]
__UpperCAmelCase : int = [sort_auto_mapping(lowercase_ , overwrite=lowercase_ ) for fname in fnames]
if not overwrite and any(lowercase_ ):
__UpperCAmelCase : Any = [f for f, d in zip(lowercase_ , lowercase_ ) if d]
raise ValueError(
f"The following files have auto mappings that need sorting: {', '.join(lowercase_ )}. Run `make style` to fix"
''' this.''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 704 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__)
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
__UpperCAmelCase : Dict = int(lowercase__)
__UpperCAmelCase : Tuple = dict(sorted(self.labels.items()))
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 4.0 , lowercase__ = None , lowercase__ = 5_0 , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : List[str] = len(lowercase__)
__UpperCAmelCase : str = self.transformer.config.sample_size
__UpperCAmelCase : List[str] = self.transformer.config.in_channels
__UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
__UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase__ , device=self.device).reshape(-1)
__UpperCAmelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device)
__UpperCAmelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
__UpperCAmelCase : List[str] = latent_model_input[: len(lowercase__) // 2]
__UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0)
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : Any = t
if not torch.is_tensor(lowercase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase : List[str] = latent_model_input.device.type == '''mps'''
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
__UpperCAmelCase : Any = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , len(lowercase__) // 2 , dim=0)
__UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase : str = torch.cat([half_eps, half_eps] , dim=0)
__UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , lowercase__ , dim=1)
else:
__UpperCAmelCase : Any = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
__UpperCAmelCase : List[Any] = latent_model_input
__UpperCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase : Optional[int] = self.vae.decode(lowercase__).sample
__UpperCAmelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : str = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = BertJapaneseTokenizer
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = True
def A( self):
super().setUp()
__UpperCAmelCase : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def A( self , lowercase__):
__UpperCAmelCase : int = '''こんにちは、世界。 \nこんばんは、世界。'''
__UpperCAmelCase : Union[str, Any] = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def A( self , lowercase__):
__UpperCAmelCase : List[str] = self.get_input_output_texts(lowercase__)
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__)
__UpperCAmelCase : List[Any] = tokenizer.decode(lowercase__ , clean_up_tokenization_spaces=lowercase__)
return text, ids
def A( self):
pass # TODO add if relevant
def A( self):
pass # TODO add if relevant
def A( self):
pass # TODO add if relevant
def A( self):
__UpperCAmelCase : Dict = self.tokenizer_class(self.vocab_file)
__UpperCAmelCase : Tuple = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''')
self.assertListEqual(lowercase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
def A( self):
__UpperCAmelCase : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''')
self.assertIsNotNone(lowercase__)
__UpperCAmelCase : List[Any] = '''こんにちは、世界。\nこんばんは、世界。'''
__UpperCAmelCase : Dict = tokenizer.tokenize(lowercase__)
self.assertListEqual(lowercase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(lowercase__ , '''wb''') as handle:
pickle.dump(lowercase__ , lowercase__)
with open(lowercase__ , '''rb''') as handle:
__UpperCAmelCase : int = pickle.load(lowercase__)
__UpperCAmelCase : int = tokenizer_new.tokenize(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : Any = MecabTokenizer(mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A( self):
try:
__UpperCAmelCase : List[Any] = MecabTokenizer(mecab_dic='''unidic_lite''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A( self):
try:
__UpperCAmelCase : Optional[int] = MecabTokenizer(mecab_dic='''unidic''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A( self):
__UpperCAmelCase : Optional[Any] = MecabTokenizer(do_lower_case=lowercase__ , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A( self):
try:
__UpperCAmelCase : int = MecabTokenizer(
do_lower_case=lowercase__ , normalize_text=lowercase__ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''')
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def A( self):
__UpperCAmelCase : Optional[Any] = MecabTokenizer(normalize_text=lowercase__ , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def A( self):
__UpperCAmelCase : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''')
self.assertIsNotNone(lowercase__)
__UpperCAmelCase : Any = '''こんにちは、世界。\nこんばんは、世界。'''
__UpperCAmelCase : List[str] = tokenizer.tokenize(lowercase__)
self.assertListEqual(lowercase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(lowercase__ , '''wb''') as handle:
pickle.dump(lowercase__ , lowercase__)
with open(lowercase__ , '''rb''') as handle:
__UpperCAmelCase : Any = pickle.load(lowercase__)
__UpperCAmelCase : int = tokenizer_new.tokenize(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
@require_sudachi
def A( self):
__UpperCAmelCase : Optional[int] = SudachiTokenizer(sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def A( self):
__UpperCAmelCase : List[Any] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国''', '''人''', '''参政''', '''権'''])
@require_sudachi
def A( self):
__UpperCAmelCase : Any = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人''', '''参政権'''])
@require_sudachi
def A( self):
__UpperCAmelCase : Any = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人参政権'''])
@require_sudachi
def A( self):
__UpperCAmelCase : str = SudachiTokenizer(do_lower_case=lowercase__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def A( self):
__UpperCAmelCase : Optional[int] = SudachiTokenizer(normalize_text=lowercase__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def A( self):
__UpperCAmelCase : List[Any] = SudachiTokenizer(trim_whitespace=lowercase__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def A( self):
__UpperCAmelCase : List[str] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''')
self.assertIsNotNone(lowercase__)
__UpperCAmelCase : Union[str, Any] = '''こんにちは、世界。\nこんばんは、世界。'''
__UpperCAmelCase : List[Any] = tokenizer.tokenize(lowercase__)
self.assertListEqual(lowercase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(lowercase__ , '''wb''') as handle:
pickle.dump(lowercase__ , lowercase__)
with open(lowercase__ , '''rb''') as handle:
__UpperCAmelCase : Union[str, Any] = pickle.load(lowercase__)
__UpperCAmelCase : Tuple = tokenizer_new.tokenize(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
@require_jumanpp
def A( self):
__UpperCAmelCase : List[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def A( self):
__UpperCAmelCase : Tuple = JumanppTokenizer(do_lower_case=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def A( self):
__UpperCAmelCase : List[Any] = JumanppTokenizer(normalize_text=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def A( self):
__UpperCAmelCase : Optional[Any] = JumanppTokenizer(trim_whitespace=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def A( self):
__UpperCAmelCase : Optional[int] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''') , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def A( self):
__UpperCAmelCase : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
__UpperCAmelCase : Optional[Any] = {}
for i, token in enumerate(lowercase__):
__UpperCAmelCase : int = i
__UpperCAmelCase : Tuple = WordpieceTokenizer(vocab=lowercase__ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こんにちは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは''') , ['''こん''', '''##ばんは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''') , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''])
def A( self):
__UpperCAmelCase : Dict = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''')
__UpperCAmelCase : str = tokenizer.subword_tokenizer
__UpperCAmelCase : Optional[int] = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''')
self.assertListEqual(lowercase__ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''])
__UpperCAmelCase : Tuple = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''')
self.assertListEqual(lowercase__ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''])
def A( self):
__UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''')
__UpperCAmelCase : List[Any] = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowercase__)
__UpperCAmelCase : Union[str, Any] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowercase__)
__UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase__)
__UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : List[str] = BertJapaneseTokenizer
_lowerCAmelCase : Tuple = False
def A( self):
super().setUp()
__UpperCAmelCase : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def A( self , **lowercase__):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **lowercase__)
def A( self , lowercase__):
__UpperCAmelCase : Tuple = '''こんにちは、世界。 \nこんばんは、世界。'''
__UpperCAmelCase : Any = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def A( self):
pass # TODO add if relevant
def A( self):
pass # TODO add if relevant
def A( self):
pass # TODO add if relevant
def A( self):
__UpperCAmelCase : Dict = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''')
__UpperCAmelCase : Optional[int] = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''')
self.assertListEqual(
lowercase__ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2])
def A( self):
__UpperCAmelCase : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__UpperCAmelCase : Dict = {}
for i, token in enumerate(lowercase__):
__UpperCAmelCase : int = i
__UpperCAmelCase : Tuple = CharacterTokenizer(vocab=lowercase__ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''])
self.assertListEqual(tokenizer.tokenize('''こんにちほ''') , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''])
def A( self):
__UpperCAmelCase : str = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''')
__UpperCAmelCase : Optional[int] = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowercase__)
__UpperCAmelCase : int = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowercase__)
__UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase__)
__UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : str = '''cl-tohoku/bert-base-japanese'''
__UpperCAmelCase : str = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : Tuple = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertTokenizer.from_pretrained(lowercase__)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.'''))
__UpperCAmelCase : str = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertJapaneseTokenizer.from_pretrained(lowercase__)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.'''))
| 705 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 0 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCamelCase ( pl.LightningModule ):
def __init__( self , lowercase__):
super().__init__()
__UpperCAmelCase : List[str] = model
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : int = nn.Linear(self.model.config.hidden_size , self.num_labels)
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = LongformerModel.from_pretrained(lowercase_ )
__UpperCAmelCase : List[Any] = LightningModel(lowercase_ )
__UpperCAmelCase : Any = torch.load(lowercase_ , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
__UpperCAmelCase : Optional[Any] = LongformerForQuestionAnswering.from_pretrained(lowercase_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowercase_ )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 706 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 0 |
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase = """docs/source/en/_toctree.yml"""
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = defaultdict(lowercase_ )
__UpperCAmelCase : Any = []
__UpperCAmelCase : Tuple = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(lowercase_ )
__UpperCAmelCase : List[Any] = new_doc_list
__UpperCAmelCase : Dict = [key for key, value in counts.items() if value > 1]
__UpperCAmelCase : Union[str, Any] = []
for duplicate_key in duplicates:
__UpperCAmelCase : List[str] = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(lowercase_ ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
__UpperCAmelCase : int = sorted(lowercase_ , key=lambda lowercase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowercase_ ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(lowercase_ )
# Sort
return overview_doc
def __SCREAMING_SNAKE_CASE ( lowercase_=False ) -> List[Any]:
'''simple docstring'''
with open(lowercase_ , encoding='''utf-8''' ) as f:
__UpperCAmelCase : Optional[int] = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase : List[Any] = content[api_idx]['''sections''']
# Then to the model doc
__UpperCAmelCase : Any = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__UpperCAmelCase : Union[str, Any] = api_doc[scheduler_idx]['''sections''']
__UpperCAmelCase : Optional[int] = clean_doc_toc(lowercase_ )
__UpperCAmelCase : Optional[int] = False
if new_scheduler_doc != scheduler_doc:
__UpperCAmelCase : Tuple = True
if overwrite:
__UpperCAmelCase : Union[str, Any] = new_scheduler_doc
if diff:
if overwrite:
__UpperCAmelCase : Dict = api_doc
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowercase_ , allow_unicode=lowercase_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def __SCREAMING_SNAKE_CASE ( lowercase_=False ) -> List[Any]:
'''simple docstring'''
with open(lowercase_ , encoding='''utf-8''' ) as f:
__UpperCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase : Optional[int] = content[api_idx]['''sections''']
# Then to the model doc
__UpperCAmelCase : Any = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__UpperCAmelCase : Any = False
__UpperCAmelCase : Union[str, Any] = api_doc[pipeline_idx]['''sections''']
__UpperCAmelCase : List[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__UpperCAmelCase : Dict = pipeline_doc['''section''']
__UpperCAmelCase : Optional[Any] = clean_doc_toc(lowercase_ )
if overwrite:
__UpperCAmelCase : int = new_sub_pipeline_doc
new_pipeline_docs.append(lowercase_ )
# sort overall pipeline doc
__UpperCAmelCase : List[str] = clean_doc_toc(lowercase_ )
if new_pipeline_docs != pipeline_docs:
__UpperCAmelCase : Tuple = True
if overwrite:
__UpperCAmelCase : List[Any] = new_pipeline_docs
if diff:
if overwrite:
__UpperCAmelCase : Union[str, Any] = api_doc
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowercase_ , allow_unicode=lowercase_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 707 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__UpperCAmelCase : Optional[Any] = model_type_to_module_name(lowercase_ )
__UpperCAmelCase : Tuple = importlib.import_module(f".{module_name}" , '''transformers.models''' )
try:
return getattr(lowercase_ , lowercase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowercase_ , '''__name__''' , lowercase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__UpperCAmelCase : Tuple = importlib.import_module('''transformers''' )
if hasattr(lowercase_ , lowercase_ ):
return getattr(lowercase_ , lowercase_ )
return None
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , **lowercase_ , ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = get_file_from_repo(
lowercase_ , lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , resume_download=lowercase_ , proxies=lowercase_ , use_auth_token=lowercase_ , revision=lowercase_ , local_files_only=lowercase_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(lowercase_ , encoding='''utf-8''' ) as reader:
return json.load(lowercase_ )
class lowerCamelCase :
def __init__( self):
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''')
@classmethod
@replace_list_option_in_docstrings(lowercase__)
def A( cls , lowercase__ , **lowercase__):
__UpperCAmelCase : Optional[int] = kwargs.pop('''config''' , lowercase__)
__UpperCAmelCase : List[str] = kwargs.pop('''trust_remote_code''' , lowercase__)
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : int = ImageProcessingMixin.get_image_processor_dict(lowercase__ , **lowercase__)
__UpperCAmelCase : Optional[Any] = config_dict.get('''image_processor_type''' , lowercase__)
__UpperCAmelCase : List[str] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {}):
__UpperCAmelCase : str = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__UpperCAmelCase : Any = config_dict.pop('''feature_extractor_type''' , lowercase__)
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''')
__UpperCAmelCase : List[Any] = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''')
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {}):
__UpperCAmelCase : Any = config_dict['''auto_map''']['''AutoFeatureExtractor''']
__UpperCAmelCase : Optional[int] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''')
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''')
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ , **lowercase__)
# It could be in `config.image_processor_type``
__UpperCAmelCase : str = getattr(lowercase__ , '''image_processor_type''' , lowercase__)
if hasattr(lowercase__ , '''auto_map''') and "AutoImageProcessor" in config.auto_map:
__UpperCAmelCase : int = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
__UpperCAmelCase : str = image_processor_class_from_name(lowercase__)
__UpperCAmelCase : Union[str, Any] = image_processor_auto_map is not None
__UpperCAmelCase : List[str] = image_processor_class is not None or type(lowercase__) in IMAGE_PROCESSOR_MAPPING
__UpperCAmelCase : Optional[int] = resolve_trust_remote_code(
lowercase__ , lowercase__ , lowercase__ , lowercase__)
if has_remote_code and trust_remote_code:
__UpperCAmelCase : List[str] = get_class_from_dynamic_module(
lowercase__ , lowercase__ , **lowercase__)
__UpperCAmelCase : List[str] = kwargs.pop('''code_revision''' , lowercase__)
if os.path.isdir(lowercase__):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowercase__ , **lowercase__)
elif image_processor_class is not None:
return image_processor_class.from_dict(lowercase__ , **lowercase__)
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowercase__) in IMAGE_PROCESSOR_MAPPING:
__UpperCAmelCase : Optional[int] = IMAGE_PROCESSOR_MAPPING[type(lowercase__)]
return image_processor_class.from_dict(lowercase__ , **lowercase__)
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys())}")
@staticmethod
def A( lowercase__ , lowercase__):
IMAGE_PROCESSOR_MAPPING.register(lowercase__ , lowercase__)
| 708 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 709 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger("""transformers.models.encodec""")
lowerCAmelCase = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
lowerCAmelCase = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
lowerCAmelCase = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
lowerCAmelCase = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
lowerCAmelCase = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
lowerCAmelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCAmelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCAmelCase = []
lowerCAmelCase = []
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCAmelCase : Dict = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
__UpperCAmelCase : int = getattr(lowercase_ , lowercase_ ).shape
else:
__UpperCAmelCase : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
__UpperCAmelCase : List[str] = value
elif weight_type == "weight_v":
__UpperCAmelCase : List[str] = value
elif weight_type == "bias":
__UpperCAmelCase : Dict = value
elif weight_type == "running_mean":
__UpperCAmelCase : Optional[Any] = value
elif weight_type == "running_var":
__UpperCAmelCase : Tuple = value
elif weight_type == "num_batches_tracked":
__UpperCAmelCase : Any = value
elif weight_type == "weight_ih_l0":
__UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_hh_l0":
__UpperCAmelCase : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
__UpperCAmelCase : List[Any] = value
elif weight_type == "bias_hh_l0":
__UpperCAmelCase : str = value
elif weight_type == "weight_ih_l1":
__UpperCAmelCase : Dict = value
elif weight_type == "weight_hh_l1":
__UpperCAmelCase : str = value
elif weight_type == "bias_ih_l1":
__UpperCAmelCase : List[str] = value
elif weight_type == "bias_hh_l1":
__UpperCAmelCase : Optional[int] = value
else:
__UpperCAmelCase : Union[str, Any] = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__UpperCAmelCase : Optional[int] = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : str = []
if model_name == "encodec_24khz" or "encodec_32khz":
__UpperCAmelCase : Union[str, Any] = MAPPING_24K
elif model_name == "encodec_48khz":
__UpperCAmelCase : Dict = MAPPING_48K
else:
raise ValueError(f"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(lowercase_ , lowercase_ ):
logger.info(f"{name} was ignored" )
continue
__UpperCAmelCase : Optional[Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__UpperCAmelCase : Optional[int] = key.split('''.*.''' )
if prefix in name and suffix in name:
__UpperCAmelCase : List[str] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
__UpperCAmelCase : Tuple = True
if "*" in mapped_key:
__UpperCAmelCase : Dict = name.split(lowercase_ )[0].split('''.''' )[-2]
__UpperCAmelCase : Tuple = mapped_key.replace('''*''' , lowercase_ )
if "weight_g" in name:
__UpperCAmelCase : Optional[int] = '''weight_g'''
elif "weight_v" in name:
__UpperCAmelCase : List[Any] = '''weight_v'''
elif "weight_ih_l0" in name:
__UpperCAmelCase : Dict = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
__UpperCAmelCase : List[Any] = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
__UpperCAmelCase : str = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
__UpperCAmelCase : Any = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
__UpperCAmelCase : Optional[Any] = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
__UpperCAmelCase : Optional[Any] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
__UpperCAmelCase : str = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
__UpperCAmelCase : Optional[Any] = '''bias_hh_l1'''
elif "bias" in name:
__UpperCAmelCase : Tuple = '''bias'''
elif "weight" in name:
__UpperCAmelCase : str = '''weight'''
elif "running_mean" in name:
__UpperCAmelCase : Any = '''running_mean'''
elif "running_var" in name:
__UpperCAmelCase : int = '''running_var'''
elif "num_batches_tracked" in name:
__UpperCAmelCase : Dict = '''num_batches_tracked'''
else:
__UpperCAmelCase : Optional[Any] = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"Unused weights: {unused_weights}" )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> Any:
'''simple docstring'''
if config_path is not None:
__UpperCAmelCase : int = EncodecConfig.from_pretrained(lowercase_ )
else:
__UpperCAmelCase : Dict = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__UpperCAmelCase : Optional[int] = [8, 5, 4, 4]
__UpperCAmelCase : str = [2.2]
__UpperCAmelCase : Optional[int] = 64
__UpperCAmelCase : str = 32000
__UpperCAmelCase : Optional[Any] = 2048
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Tuple = False
elif model_name == "encodec_48khz":
__UpperCAmelCase : Tuple = [8, 5, 4, 2]
__UpperCAmelCase : str = [3.0, 6.0, 12.0, 24.0]
__UpperCAmelCase : str = 48000
__UpperCAmelCase : List[Any] = 2
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : str = '''time_group_norm'''
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Optional[int] = 1.0
__UpperCAmelCase : Optional[int] = 0.0_1
else:
raise ValueError(f"Unknown model name: {model_name}" )
__UpperCAmelCase : str = EncodecModel(lowercase_ )
__UpperCAmelCase : Optional[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowercase_ )
__UpperCAmelCase : int = torch.load(lowercase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__UpperCAmelCase : Optional[int] = original_checkpoint['''best_state''']
recursively_load_weights(lowercase_ , lowercase_ , lowercase_ )
model.save_pretrained(lowercase_ )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 710 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = state_dict.pop(lowercase_ )
__UpperCAmelCase : Tuple = val
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__UpperCAmelCase : Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__UpperCAmelCase : int = value
else:
__UpperCAmelCase : List[str] = value
return new_state_dict
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : int = ''''''
if is_panoptic:
__UpperCAmelCase : List[Any] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCAmelCase : Dict = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__UpperCAmelCase : Dict = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Any = in_proj_weight[:256, :]
__UpperCAmelCase : Dict = in_proj_bias[:256]
__UpperCAmelCase : str = in_proj_weight[256:512, :]
__UpperCAmelCase : Any = in_proj_bias[256:512]
__UpperCAmelCase : List[str] = in_proj_weight[-256:, :]
__UpperCAmelCase : Tuple = in_proj_bias[-256:]
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Union[str, Any] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__UpperCAmelCase : Dict = '''resnet101'''
if "dc5" in model_name:
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Optional[Any] = '''panoptic''' in model_name
if is_panoptic:
__UpperCAmelCase : Optional[int] = 250
else:
__UpperCAmelCase : Dict = 91
__UpperCAmelCase : Optional[Any] = '''huggingface/label-files'''
__UpperCAmelCase : int = '''coco-detection-id2label.json'''
__UpperCAmelCase : Tuple = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : Optional[Any] = {int(lowercase_ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = idalabel
__UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
# load image processor
__UpperCAmelCase : Any = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__UpperCAmelCase : str = ConditionalDetrImageProcessor(format=lowercase_ )
# prepare image
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : Optional[int] = image_processor(images=lowercase_ , return_tensors='''pt''' )
__UpperCAmelCase : List[Any] = encoding['''pixel_values''']
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
__UpperCAmelCase : Optional[Any] = torch.hub.load('''DeppMeng/ConditionalDETR''' , lowercase_ , pretrained=lowercase_ ).eval()
__UpperCAmelCase : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__UpperCAmelCase : str = '''conditional_detr.''' + src
rename_key(lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : Any = rename_backbone_keys(lowercase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase_ , is_panoptic=lowercase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCAmelCase : Optional[int] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__UpperCAmelCase : Union[str, Any] = state_dict.pop(lowercase_ )
__UpperCAmelCase : str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__UpperCAmelCase : Tuple = state_dict.pop(lowercase_ )
__UpperCAmelCase : int = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__UpperCAmelCase : Dict = state_dict.pop(lowercase_ )
__UpperCAmelCase : str = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__UpperCAmelCase : List[Any] = state_dict.pop(lowercase_ )
__UpperCAmelCase : int = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : str = ConditionalDetrForSegmentation(lowercase_ ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
model.push_to_hub(repo_id=lowercase_ , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
__UpperCAmelCase : List[Any] = conditional_detr(lowercase_ )
__UpperCAmelCase : int = model(lowercase_ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCAmelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 711 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
__UpperCAmelCase : Optional[int] = gray_code_sequence_string(lowercase_ )
#
# convert them to integers
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = int(sequence[i] , 2 )
return sequence
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__UpperCAmelCase : Union[str, Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__UpperCAmelCase : Any = gray_code_sequence_string(bit_count - 1 )
__UpperCAmelCase : Optional[Any] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__UpperCAmelCase : Any = '''0''' + smaller_sequence[i]
sequence.append(lowercase_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__UpperCAmelCase : List[Any] = '''1''' + smaller_sequence[i]
sequence.append(lowercase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 0 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=4 , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.1 , lowercase__=True , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : int = seq_length
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : str = intermediate_multiple_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout
__UpperCAmelCase : Any = attention_dropout
__UpperCAmelCase : Optional[int] = weight_tying
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : Union[str, Any] = type_sequence_label_size
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : List[str] = num_choices
__UpperCAmelCase : Tuple = scope
def A( self):
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def A( self):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self):
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Dict = GPTNeoXJapaneseModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[str] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Union[str, Any] = GPTNeoXJapaneseModel(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : int = GPTNeoXJapaneseForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : List[Any] = GPTNeoXJapaneseForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
# first forward pass
__UpperCAmelCase : Dict = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__UpperCAmelCase : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : str = torch.cat([input_mask, next_mask] , dim=-1)
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , output_hidden_states=lowercase__)
__UpperCAmelCase : Optional[Any] = output_from_no_past['''hidden_states'''][0]
__UpperCAmelCase : int = model(
lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ , output_hidden_states=lowercase__ , )['''hidden_states'''][0]
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : str = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self):
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCAmelCase : int = config_and_inputs
__UpperCAmelCase : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : List[str] = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Optional[Any] = False
def A( self):
__UpperCAmelCase : Any = GPTNeoXJapaneseModelTester(self)
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowercase__ , lowercase__ , lowercase__)
def A( self):
# This regression test was failing with PyTorch < 1.3
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
__UpperCAmelCase : str = None
self.model_tester.create_and_check_model_as_decoder(lowercase__ , lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase__ , lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : List[str] = '''abeja/gpt-neox-japanese-2.7b'''
__UpperCAmelCase : Tuple = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
__UpperCAmelCase : List[str] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
__UpperCAmelCase : List[Any] = GPTNeoXJapaneseTokenizer.from_pretrained(lowercase__)
__UpperCAmelCase : List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowercase__)
__UpperCAmelCase : Tuple = []
for prompt in prompts:
__UpperCAmelCase : Union[str, Any] = tokenizer(lowercase__ , return_tensors='''pt''').input_ids
__UpperCAmelCase : Optional[int] = model.generate(lowercase__ , max_length=5_0)
__UpperCAmelCase : int = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
predicted_outputs += generated_string
self.assertListEqual(lowercase__ , lowercase__)
| 713 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 0 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 714 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCAmelCase = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
lowerCAmelCase = None
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=lowercase_ , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=lowercase_ , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__UpperCAmelCase : Optional[Any] = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
def remove_articles(lowercase_ ):
return ARTICLES_REGEX.sub(''' ''' , lowercase_ )
def white_space_fix(lowercase_ ):
return " ".join(text.split() )
def remove_punc(lowercase_ ):
__UpperCAmelCase : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
if not s:
return []
return normalize_answer(lowercase_ ).split()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
return int(normalize_answer(lowercase_ ) == normalize_answer(lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = get_tokens(lowercase_ )
__UpperCAmelCase : Optional[Any] = get_tokens(lowercase_ )
__UpperCAmelCase : Union[str, Any] = collections.Counter(lowercase_ ) & collections.Counter(lowercase_ )
__UpperCAmelCase : List[Any] = sum(common.values() )
if len(lowercase_ ) == 0 or len(lowercase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__UpperCAmelCase : str = 1.0 * num_same / len(lowercase_ )
__UpperCAmelCase : Optional[int] = 1.0 * num_same / len(lowercase_ )
__UpperCAmelCase : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = {}
__UpperCAmelCase : List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__UpperCAmelCase : List[Any] = qa['''id''']
__UpperCAmelCase : Optional[int] = [t for t in qa['''answers''']['''text'''] if normalize_answer(lowercase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__UpperCAmelCase : Union[str, Any] = ['''''']
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
__UpperCAmelCase : str = preds[qid]
# Take max over all gold answers
__UpperCAmelCase : Tuple = max(compute_exact(lowercase_ , lowercase_ ) for a in gold_answers )
__UpperCAmelCase : str = max(compute_fa(lowercase_ , lowercase_ ) for a in gold_answers )
return exact_scores, fa_scores
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = {}
for qid, s in scores.items():
__UpperCAmelCase : Tuple = na_probs[qid] > na_prob_thresh
if pred_na:
__UpperCAmelCase : Union[str, Any] = float(not qid_to_has_ans[qid] )
else:
__UpperCAmelCase : List[str] = s
return new_scores
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None ) -> int:
'''simple docstring'''
if not qid_list:
__UpperCAmelCase : int = len(lowercase_ )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__UpperCAmelCase : int = len(lowercase_ )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
__UpperCAmelCase : Optional[int] = new_eval[k]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
plt.step(lowercase_ , lowercase_ , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(lowercase_ , lowercase_ , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(lowercase_ )
plt.savefig(lowercase_ )
plt.clf()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] )
__UpperCAmelCase : Union[str, Any] = 0.0
__UpperCAmelCase : int = 1.0
__UpperCAmelCase : str = 0.0
__UpperCAmelCase : str = [1.0]
__UpperCAmelCase : str = [0.0]
__UpperCAmelCase : Union[str, Any] = 0.0
for i, qid in enumerate(lowercase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__UpperCAmelCase : Dict = true_pos / float(i + 1 )
__UpperCAmelCase : List[Any] = true_pos / float(lowercase_ )
if i == len(lowercase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowercase_ )
recalls.append(lowercase_ )
if out_image:
plot_pr_curve(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return {"ap": 100.0 * avg_prec}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
if out_image_dir and not os.path.exists(lowercase_ ):
os.makedirs(lowercase_ )
__UpperCAmelCase : str = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__UpperCAmelCase : Tuple = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__UpperCAmelCase : Dict = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__UpperCAmelCase : List[str] = {k: float(lowercase_ ) for k, v in qid_to_has_ans.items()}
__UpperCAmelCase : List[str] = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(lowercase_ , lowercase_ , '''pr_exact''' )
merge_eval(lowercase_ , lowercase_ , '''pr_f1''' )
merge_eval(lowercase_ , lowercase_ , '''pr_oracle''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if not qid_list:
return
__UpperCAmelCase : Tuple = [na_probs[k] for k in qid_list]
__UpperCAmelCase : str = np.ones_like(lowercase_ ) / float(len(lowercase_ ) )
plt.hist(lowercase_ , weights=lowercase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(lowercase_ , f"na_prob_hist_{name}.png" ) )
plt.clf()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__UpperCAmelCase : Any = num_no_ans
__UpperCAmelCase : Optional[Any] = cur_score
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] )
for i, qid in enumerate(lowercase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__UpperCAmelCase : Tuple = scores[qid]
else:
if preds[qid]:
__UpperCAmelCase : Optional[Any] = -1
else:
__UpperCAmelCase : str = 0
cur_score += diff
if cur_score > best_score:
__UpperCAmelCase : Optional[int] = cur_score
__UpperCAmelCase : int = na_probs[qid]
return 100.0 * best_score / len(lowercase_ ), best_thresh
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : Optional[int] = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : int = best_exact
__UpperCAmelCase : List[Any] = exact_thresh
__UpperCAmelCase : Union[str, Any] = best_fa
__UpperCAmelCase : Optional[Any] = fa_thresh
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
with open(OPTS.data_file ) as f:
__UpperCAmelCase : List[Any] = json.load(lowercase_ )
__UpperCAmelCase : List[Any] = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__UpperCAmelCase : str = json.load(lowercase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__UpperCAmelCase : Optional[int] = json.load(lowercase_ )
else:
__UpperCAmelCase : List[str] = {k: 0.0 for k in preds}
__UpperCAmelCase : int = make_qid_to_has_ans(lowercase_ ) # maps qid to True/False
__UpperCAmelCase : int = [k for k, v in qid_to_has_ans.items() if v]
__UpperCAmelCase : List[Any] = [k for k, v in qid_to_has_ans.items() if not v]
__UpperCAmelCase : Dict = get_raw_scores(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
__UpperCAmelCase : Optional[Any] = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
__UpperCAmelCase : Any = make_eval_dict(lowercase_ , lowercase_ )
if has_ans_qids:
__UpperCAmelCase : Tuple = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , '''HasAns''' )
if no_ans_qids:
__UpperCAmelCase : List[str] = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , OPTS.out_image_dir )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
else:
print(json.dumps(lowercase_ , indent=2 ) )
if __name__ == "__main__":
lowerCAmelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 715 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''mra'''
def __init__( self , lowercase__=5_0_2_6_5 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1 , lowercase__=0.0_2 , lowercase__=1e-5 , lowercase__="absolute" , lowercase__=4 , lowercase__="full" , lowercase__=0 , lowercase__=0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Optional[int] = max_position_embeddings
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = position_embedding_type
__UpperCAmelCase : List[str] = block_per_row
__UpperCAmelCase : Optional[int] = approx_mode
__UpperCAmelCase : List[str] = initial_prior_first_n_blocks
__UpperCAmelCase : List[Any] = initial_prior_diagonal_n_blocks
| 716 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ = 1000000 ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowercase_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 717 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 0 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = 0
if start < end:
__UpperCAmelCase : List[str] = randint(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = a[end]
__UpperCAmelCase : int = a[pivot]
__UpperCAmelCase : List[Any] = temp
__UpperCAmelCase : Dict = _in_place_partition(lowercase_ , lowercase_ , lowercase_ )
count += _in_place_quick_sort(lowercase_ , lowercase_ , p - 1 )
count += _in_place_quick_sort(lowercase_ , p + 1 , lowercase_ )
return count
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : int = randint(lowercase_ , lowercase_ )
__UpperCAmelCase : Dict = a[end]
__UpperCAmelCase : Any = a[pivot]
__UpperCAmelCase : Optional[int] = temp
__UpperCAmelCase : List[str] = start - 1
for index in range(lowercase_ , lowercase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__UpperCAmelCase : Optional[int] = new_pivot_index + 1
__UpperCAmelCase : Union[str, Any] = a[new_pivot_index]
__UpperCAmelCase : Optional[int] = a[index]
__UpperCAmelCase : Optional[int] = temp
__UpperCAmelCase : Tuple = a[new_pivot_index + 1]
__UpperCAmelCase : Optional[Any] = a[end]
__UpperCAmelCase : Optional[int] = temp
return new_pivot_index + 1, count
lowerCAmelCase = TemporaryFile()
lowerCAmelCase = 100 # 1000 elements are to be sorted
lowerCAmelCase ,lowerCAmelCase = 0, 1 # mean and standard deviation
lowerCAmelCase = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowerCAmelCase = np.load(outfile)
lowerCAmelCase = len(M) - 1
lowerCAmelCase = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 718 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 0 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase = """naver-clova-ix/donut-base"""
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : List[str] = DonutProcessor.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase : Dict = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
__UpperCAmelCase : Union[str, Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
__UpperCAmelCase : Dict = self.processor.tokenajson(lowercase__)
self.assertDictEqual(lowercase__ , lowercase__)
| 719 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES
_lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Tuple = ['''input_ids''', '''attention_mask''']
_lowerCAmelCase : List[str] = None
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ):
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , )
__UpperCAmelCase : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , lowercase__) != add_prefix_space:
__UpperCAmelCase : List[Any] = getattr(lowercase__ , pre_tok_state.pop('''type'''))
__UpperCAmelCase : str = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**lowercase__)
__UpperCAmelCase : Optional[int] = add_prefix_space
def A( self , *lowercase__ , **lowercase__):
__UpperCAmelCase : Optional[int] = kwargs.get('''is_split_into_words''' , lowercase__)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
''' pretokenized inputs.''')
return super()._batch_encode_plus(*lowercase__ , **lowercase__)
def A( self , *lowercase__ , **lowercase__):
__UpperCAmelCase : Union[str, Any] = kwargs.get('''is_split_into_words''' , lowercase__)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
''' pretokenized inputs.''')
return super()._encode_plus(*lowercase__ , **lowercase__)
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(lowercase__ , name=lowercase__)
return tuple(lowercase__)
def A( self , lowercase__):
__UpperCAmelCase : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__) + [self.eos_token_id])
if len(lowercase__) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
return input_ids
| 720 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[Any] = RoCBertTokenizer
_lowerCAmelCase : Any = None
_lowerCAmelCase : int = False
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Optional[int] = filter_non_english
def A( self):
super().setUp()
__UpperCAmelCase : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : Union[str, Any] = {}
for i, value in enumerate(lowercase__):
__UpperCAmelCase : Dict = i
__UpperCAmelCase : Dict = i
__UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''])
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
with open(self.word_shape_file , '''w''' , encoding='''utf-8''') as word_shape_writer:
json.dump(lowercase__ , lowercase__ , ensure_ascii=lowercase__)
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''') as word_pronunciation_writer:
json.dump(lowercase__ , lowercase__ , ensure_ascii=lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__UpperCAmelCase : Tuple = tokenizer.tokenize('''你好[SEP]你是谁''')
self.assertListEqual(lowercase__ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowercase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowercase__) , [5, 6, 2, 5, 7, 8])
def A( self):
__UpperCAmelCase : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''') , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''])
def A( self):
__UpperCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def A( self):
__UpperCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''h\u00E9llo'''])
def A( self):
__UpperCAmelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def A( self):
__UpperCAmelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def A( self):
__UpperCAmelCase : Any = RoCBertBasicTokenizer(do_lower_case=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A( self):
__UpperCAmelCase : str = RoCBertBasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A( self):
__UpperCAmelCase : Any = RoCBertBasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A( self):
__UpperCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=lowercase__ , never_split=['''[UNK]'''])
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''])
def A( self):
__UpperCAmelCase : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__UpperCAmelCase : Dict = {}
for i, token in enumerate(lowercase__):
__UpperCAmelCase : List[Any] = i
__UpperCAmelCase : Optional[Any] = RoCBertWordpieceTokenizer(vocab=lowercase__ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''unwanted running''') , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.tokenize('''unwantedX running''') , ['''[UNK]''', '''runn''', '''##ing'''])
def A( self):
self.assertTrue(_is_whitespace(''' '''))
self.assertTrue(_is_whitespace('''\t'''))
self.assertTrue(_is_whitespace('''\r'''))
self.assertTrue(_is_whitespace('''\n'''))
self.assertTrue(_is_whitespace('''\u00A0'''))
self.assertFalse(_is_whitespace('''A'''))
self.assertFalse(_is_whitespace('''-'''))
def A( self):
self.assertTrue(_is_control('''\u0005'''))
self.assertFalse(_is_control('''A'''))
self.assertFalse(_is_control(''' '''))
self.assertFalse(_is_control('''\t'''))
self.assertFalse(_is_control('''\r'''))
def A( self):
self.assertTrue(_is_punctuation('''-'''))
self.assertTrue(_is_punctuation('''$'''))
self.assertTrue(_is_punctuation('''`'''))
self.assertTrue(_is_punctuation('''.'''))
self.assertFalse(_is_punctuation('''A'''))
self.assertFalse(_is_punctuation(''' '''))
def A( self):
__UpperCAmelCase : Optional[int] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase__) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']])
if self.test_rust_tokenizer:
__UpperCAmelCase : Any = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase__) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']])
def A( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
__UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__)
__UpperCAmelCase : Union[str, Any] = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__UpperCAmelCase : int = tokenizer_r.encode_plus(
lowercase__ , return_attention_mask=lowercase__ , return_token_type_ids=lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ , )
__UpperCAmelCase : Dict = tokenizer_r.do_lower_case if hasattr(lowercase__ , '''do_lower_case''') else False
__UpperCAmelCase : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids''']))
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''])
def A( self):
__UpperCAmelCase : Tuple = ['''的''', '''人''', '''有''']
__UpperCAmelCase : List[Any] = ''''''.join(lowercase__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
__UpperCAmelCase : int = True
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__)
__UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__)
__UpperCAmelCase : int = tokenizer_p.encode(lowercase__ , add_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer_r.encode(lowercase__ , add_special_tokens=lowercase__)
__UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(lowercase__)
__UpperCAmelCase : List[Any] = tokenizer_p.convert_ids_to_tokens(lowercase__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
__UpperCAmelCase : int = False
__UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__)
__UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__)
__UpperCAmelCase : Optional[Any] = tokenizer_r.encode(lowercase__ , add_special_tokens=lowercase__)
__UpperCAmelCase : Tuple = tokenizer_p.encode(lowercase__ , add_special_tokens=lowercase__)
__UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(lowercase__)
__UpperCAmelCase : str = tokenizer_p.convert_ids_to_tokens(lowercase__)
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCAmelCase : Any = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(lowercase__)
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
@slow
def A( self):
__UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__UpperCAmelCase : Any = tokenizer.encode('''你好''' , add_special_tokens=lowercase__)
__UpperCAmelCase : Union[str, Any] = tokenizer.encode('''你是谁''' , add_special_tokens=lowercase__)
__UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase__)
__UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def A( self):
__UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=lowercase__)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
__UpperCAmelCase : int = '''你好,你是谁'''
__UpperCAmelCase : List[str] = tokenizer.tokenize(lowercase__)
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(lowercase__)
__UpperCAmelCase : str = tokenizer.convert_tokens_to_shape_ids(lowercase__)
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_pronunciation_ids(lowercase__)
__UpperCAmelCase : Any = tokenizer.prepare_for_model(
lowercase__ , lowercase__ , lowercase__ , add_special_tokens=lowercase__)
__UpperCAmelCase : List[str] = tokenizer.encode_plus(lowercase__ , add_special_tokens=lowercase__)
self.assertEqual(lowercase__ , lowercase__)
| 721 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = IFPipeline
__UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _a ( self) -> Optional[Any]:
return self._get_dummy_components()
def _a ( self , lowercase_ , lowercase_=0) -> Union[str, Any]:
if str(lowercase_).startswith('mps'):
__snake_case = torch.manual_seed(lowercase_)
else:
__snake_case = torch.Generator(device=lowercase_).manual_seed(lowercase_)
__snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _a ( self) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _a ( self) -> int:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def _a ( self) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def _a ( self) -> Dict:
self._test_save_load_local()
def _a ( self) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def _a ( self) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self) -> Any:
# if
__snake_case = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa)
__snake_case = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=lowercase_ , tokenizer=lowercase_)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda')
__snake_case , __snake_case = pipe_a.encode_prompt('anime turtle' , device='cuda')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__snake_case = None
__snake_case = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(lowercase_ , lowercase_ , lowercase_ , lowercase_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__snake_case = IFImgaImgPipeline(**pipe_a.components)
__snake_case = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(lowercase_ , lowercase_ , lowercase_ , lowercase_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__snake_case = IFInpaintingPipeline(**pipe_a.components)
__snake_case = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(lowercase_ , lowercase_ , lowercase_ , lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
__snake_case = torch.Generator(device='cpu').manual_seed(0)
__snake_case = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , num_inference_steps=2 , generator=lowercase_ , output_type='np' , )
__snake_case = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy')
assert_mean_pixel_difference(lowercase_ , lowercase_)
# pipeline 2
_start_torch_memory_measurement()
__snake_case = torch.Generator(device='cpu').manual_seed(0)
__snake_case = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(lowercase_)
__snake_case = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , )
__snake_case = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy')
assert_mean_pixel_difference(lowercase_ , lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[str]:
# pipeline 1
_start_torch_memory_measurement()
__snake_case = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(lowercase_)
__snake_case = torch.Generator(device='cpu').manual_seed(0)
__snake_case = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , image=lowercase_ , num_inference_steps=2 , generator=lowercase_ , output_type='np' , )
__snake_case = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy')
assert_mean_pixel_difference(lowercase_ , lowercase_)
# pipeline 2
_start_torch_memory_measurement()
__snake_case = torch.Generator(device='cpu').manual_seed(0)
__snake_case = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(lowercase_)
__snake_case = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(lowercase_)
__snake_case = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , image=lowercase_ , original_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , )
__snake_case = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy')
assert_mean_pixel_difference(lowercase_ , lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
__snake_case = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(lowercase_)
__snake_case = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1)).to(lowercase_)
__snake_case = torch.Generator(device='cpu').manual_seed(0)
__snake_case = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , image=lowercase_ , mask_image=lowercase_ , num_inference_steps=2 , generator=lowercase_ , output_type='np' , )
__snake_case = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy')
assert_mean_pixel_difference(lowercase_ , lowercase_)
# pipeline 2
_start_torch_memory_measurement()
__snake_case = torch.Generator(device='cpu').manual_seed(0)
__snake_case = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(lowercase_)
__snake_case = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(lowercase_)
__snake_case = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1)).to(lowercase_)
__snake_case = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , image=lowercase_ , mask_image=lowercase_ , original_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , )
__snake_case = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy')
assert_mean_pixel_difference(lowercase_ , lowercase_)
def A ( ) -> Optional[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 676 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 1 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __lowercase :
def __init__( self , lowercase_) -> Optional[Any]:
if isinstance(lowercase_ , lowercase_):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__snake_case = deepcopy(lowercase_)
elif os.path.exists(lowercase_):
with io.open(lowercase_ , 'r' , encoding='utf-8') as f:
__snake_case = json.load(lowercase_)
else:
try:
__snake_case = baseaa.urlsafe_baadecode(lowercase_).decode('utf-8')
__snake_case = json.loads(lowercase_)
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}")
__snake_case = config
self.set_stage_and_offload()
def _a ( self) -> Tuple:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
__snake_case = self.get_value('zero_optimization.stage' , -1)
# offload
__snake_case = False
if self.is_zeroa() or self.is_zeroa():
__snake_case = set(['cpu', 'nvme'])
__snake_case = set(
[
self.get_value('zero_optimization.offload_optimizer.device'),
self.get_value('zero_optimization.offload_param.device'),
])
if len(offload_devices & offload_devices_valid) > 0:
__snake_case = True
def _a ( self , lowercase_) -> Optional[int]:
__snake_case = self.config
# find the config node of interest if it exists
__snake_case = ds_key_long.split('.')
__snake_case = nodes.pop()
for node in nodes:
__snake_case = config.get(lowercase_)
if config is None:
return None, ds_key
return config, ds_key
def _a ( self , lowercase_ , lowercase_=None) -> Dict:
__snake_case , __snake_case = self.find_config_node(lowercase_)
if config is None:
return default
return config.get(lowercase_ , lowercase_)
def _a ( self , lowercase_ , lowercase_=False) -> Optional[Any]:
__snake_case = self.config
# find the config node of interest if it exists
__snake_case = ds_key_long.split('.')
for node in nodes:
__snake_case = config
__snake_case = config.get(lowercase_)
if config is None:
if must_exist:
raise ValueError(F"Can't find {ds_key_long} entry in the config: {self.config}")
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowercase_)
def _a ( self , lowercase_) -> Union[str, Any]:
__snake_case = self.get_value(lowercase_)
return False if value is None else bool(lowercase_)
def _a ( self , lowercase_) -> List[Any]:
__snake_case = self.get_value(lowercase_)
return False if value is None else not bool(lowercase_)
def _a ( self) -> Optional[int]:
return self._stage == 2
def _a ( self) -> Optional[int]:
return self._stage == 3
def _a ( self) -> int:
return self._offload
class __lowercase :
def __init__( self , lowercase_) -> List[str]:
__snake_case = engine
def _a ( self , lowercase_ , **lowercase_) -> int:
# runs backpropagation and handles mixed precision
self.engine.backward(lowercase_ , **lowercase_)
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_) -> Dict:
super().__init__(lowercase_ , device_placement=lowercase_ , scaler=lowercase_)
__snake_case = hasattr(self.optimizer , 'overflow')
def _a ( self , lowercase_=None) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _a ( self) -> int:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _a ( self) -> List[Any]:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_) -> Optional[Any]:
super().__init__(lowercase_ , lowercase_)
def _a ( self) -> Union[str, Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __lowercase :
def __init__( self , lowercase_ , lowercase_=0.001 , lowercase_=0 , **lowercase_) -> Dict:
__snake_case = params
__snake_case = lr
__snake_case = weight_decay
__snake_case = kwargs
class __lowercase :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=0 , **lowercase_) -> Dict:
__snake_case = optimizer
__snake_case = total_num_steps
__snake_case = warmup_num_steps
__snake_case = kwargs
| 676 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 1 |
from __future__ import annotations
def A ( snake_case__ : list[float] ) -> bool:
'''simple docstring'''
if len(snake_case__ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
__snake_case = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 1 |
UpperCAmelCase__ : List[str] = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
UpperCAmelCase__ : int = frozenset(["prompt", "negative_prompt"])
UpperCAmelCase__ : Optional[Any] = frozenset([])
UpperCAmelCase__ : Any = frozenset(["image"])
UpperCAmelCase__ : List[str] = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
UpperCAmelCase__ : int = frozenset(["image"])
UpperCAmelCase__ : List[Any] = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
UpperCAmelCase__ : Any = frozenset(["prompt", "image", "negative_prompt"])
UpperCAmelCase__ : Dict = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
UpperCAmelCase__ : Any = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
UpperCAmelCase__ : List[Any] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
UpperCAmelCase__ : int = frozenset(["image", "mask_image"])
UpperCAmelCase__ : str = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
UpperCAmelCase__ : Union[str, Any] = frozenset(["example_image", "image", "mask_image"])
UpperCAmelCase__ : str = frozenset(["class_labels"])
UpperCAmelCase__ : Optional[Any] = frozenset(["class_labels"])
UpperCAmelCase__ : List[str] = frozenset(["batch_size"])
UpperCAmelCase__ : List[Any] = frozenset([])
UpperCAmelCase__ : Optional[Any] = frozenset(["batch_size"])
UpperCAmelCase__ : str = frozenset([])
UpperCAmelCase__ : List[Any] = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
UpperCAmelCase__ : Optional[int] = frozenset(["prompt", "negative_prompt"])
UpperCAmelCase__ : Tuple = frozenset(["input_tokens"])
UpperCAmelCase__ : List[str] = frozenset(["input_tokens"])
| 676 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ : Any = 16
UpperCAmelCase__ : List[str] = 32
def A ( snake_case__ : Accelerator , snake_case__ : int = 16 ) -> Optional[int]:
'''simple docstring'''
__snake_case = AutoTokenizer.from_pretrained('bert-base-cased' )
__snake_case = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
__snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__snake_case = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__snake_case = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__snake_case = 16
elif accelerator.mixed_precision != "no":
__snake_case = 8
else:
__snake_case = None
return tokenizer.pad(
snake_case__ , padding='longest' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='pt' , )
# Instantiate dataloaders.
__snake_case = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
__snake_case = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ : Any = mocked_dataloaders # noqa: F811
def A ( snake_case__ : Any , snake_case__ : Optional[int] ) -> List[str]:
'''simple docstring'''
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , snake_case__ ) == "1":
__snake_case = 2
# New Code #
__snake_case = int(args.gradient_accumulation_steps )
# Initialize accelerator
__snake_case = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case = config['lr']
__snake_case = int(config['num_epochs'] )
__snake_case = int(config['seed'] )
__snake_case = int(config['batch_size'] )
__snake_case = evaluate.load('glue' , 'mrpc' )
set_seed(snake_case__ )
__snake_case , __snake_case = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__snake_case = model.to(accelerator.device )
# Instantiate optimizer
__snake_case = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
__snake_case = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case__ ):
__snake_case = model(**snake_case__ )
__snake_case = output.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__snake_case = model(**snake_case__ )
__snake_case = outputs.logits.argmax(dim=-1 )
__snake_case , __snake_case = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
__snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , snake_case__ )
def A ( ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=snake_case__ , default=snake_case__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=snake_case__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__snake_case = parser.parse_args()
__snake_case = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 676 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=1_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Dict:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> int:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> str:
__snake_case = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowercase_ , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = EsmForProteinFolding(config=lowercase_).float()
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3))
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2))
def _a ( self) -> Any:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = False
__UpperCAmelCase = (EsmForProteinFolding,) if is_torch_available() else ()
__UpperCAmelCase = ()
__UpperCAmelCase = {} if is_torch_available() else {}
__UpperCAmelCase = False
def _a ( self) -> int:
__snake_case = EsmFoldModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> Any:
self.config_tester.run_common_tests()
def _a ( self) -> Dict:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
@unittest.skip('Does not support attention outputs')
def _a ( self) -> str:
pass
@unittest.skip
def _a ( self) -> Any:
pass
@unittest.skip('Esm does not support embedding resizing')
def _a ( self) -> str:
pass
@unittest.skip('Esm does not support embedding resizing')
def _a ( self) -> Any:
pass
@unittest.skip('ESMFold does not support passing input embeds!')
def _a ( self) -> Any:
pass
@unittest.skip('ESMFold does not support head pruning.')
def _a ( self) -> Optional[int]:
pass
@unittest.skip('ESMFold does not support head pruning.')
def _a ( self) -> Optional[int]:
pass
@unittest.skip('ESMFold does not support head pruning.')
def _a ( self) -> int:
pass
@unittest.skip('ESMFold does not support head pruning.')
def _a ( self) -> List[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.')
def _a ( self) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.')
def _a ( self) -> Any:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.')
def _a ( self) -> List[Any]:
pass
@unittest.skip('ESMFold only has one output format.')
def _a ( self) -> List[Any]:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality')
def _a ( self) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support input chunking.')
def _a ( self) -> List[str]:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.')
def _a ( self) -> Optional[int]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def _a ( self) -> List[str]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def _a ( self) -> int:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def _a ( self) -> List[Any]:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.')
def _a ( self) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def _a ( self) -> Union[str, Any]:
pass
@require_torch
class __lowercase ( lowerCamelCase__ ):
@slow
def _a ( self) -> List[Any]:
__snake_case = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1').float()
model.eval()
__snake_case = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]])
__snake_case = model(lowercase_)['positions']
__snake_case = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa)
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowercase_ , atol=1e-4))
| 676 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 1 |
from collections.abc import Callable
import numpy as np
def A ( snake_case__ : Callable , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ) -> np.ndarray:
'''simple docstring'''
__snake_case = int(np.ceil((x_end - xa) / step_size ) )
__snake_case = np.zeros((n + 1,) )
__snake_case = ya
__snake_case = xa
for k in range(snake_case__ ):
__snake_case = y[k] + step_size * ode_func(snake_case__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 1 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase__ : Any = logging.get_logger(__name__)
class __lowercase ( enum.Enum ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
@add_end_docstrings(lowerCamelCase__ )
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''generated'''
def __init__( self , *lowercase_ , **lowercase_) -> int:
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def _a ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
__snake_case = {}
if truncation is not None:
__snake_case = truncation
__snake_case = generate_kwargs
__snake_case = {}
if return_tensors is not None and return_type is None:
__snake_case = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__snake_case = return_type
if clean_up_tokenization_spaces is not None:
__snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
__snake_case = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.')
__snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Dict:
return True
def _a ( self , *lowercase_ , lowercase_) -> str:
__snake_case = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input')
__snake_case = ([prefix + arg for arg in args[0]],)
__snake_case = True
elif isinstance(args[0] , lowercase_):
__snake_case = (prefix + args[0],)
__snake_case = False
else:
raise ValueError(
F" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`")
__snake_case = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *lowercase_ , **lowercase_) -> Optional[int]:
__snake_case = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def _a ( self , lowercase_ , lowercase_=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_) -> str:
__snake_case = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def _a ( self , lowercase_ , **lowercase_) -> Any:
if self.framework == "pt":
__snake_case , __snake_case = model_inputs['input_ids'].shape
elif self.framework == "tf":
__snake_case , __snake_case = tf.shape(model_inputs['input_ids']).numpy()
__snake_case = generate_kwargs.get('min_length' , self.model.config.min_length)
__snake_case = generate_kwargs.get('max_length' , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs['min_length'] , generate_kwargs['max_length'])
__snake_case = self.model.generate(**lowercase_ , **lowercase_)
__snake_case = output_ids.shape[0]
if self.framework == "pt":
__snake_case = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
__snake_case = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def _a ( self , lowercase_ , lowercase_=ReturnType.TEXT , lowercase_=False) -> List[Any]:
__snake_case = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__snake_case = {F"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
__snake_case = {
F"{self.return_name}_text": self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCamelCase__ )
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''summary'''
def __call__( self , *lowercase_ , **lowercase_) -> int:
return super().__call__(*lowercase_ , **lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> bool:
if max_length < min_length:
logger.warning(F"Your min_length={min_length} must be inferior than your max_length={max_length}.")
if input_length < max_length:
logger.warning(
F"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})")
@add_end_docstrings(lowerCamelCase__ )
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''translation'''
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Dict:
if input_length > 0.9 * max_length:
logger.warning(
F"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)')
return True
def _a ( self , *lowercase_ , lowercase_=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_=None , lowercase_=None) -> int:
if getattr(self.tokenizer , '_build_translation_inputs' , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def _a ( self , lowercase_=None , lowercase_=None , **lowercase_) -> Union[str, Any]:
__snake_case , __snake_case , __snake_case = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
__snake_case = src_lang
if tgt_lang is not None:
__snake_case = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__snake_case = kwargs.get('task' , self.task)
__snake_case = task.split('_')
if task and len(lowercase_) == 4:
# translation, XX, to YY
__snake_case = items[1]
__snake_case = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *lowercase_ , **lowercase_) -> Union[str, Any]:
return super().__call__(*lowercase_ , **lowercase_)
| 676 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 | 1 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase = '''maskformer-swin'''
__UpperCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowercase_=2_2_4 , lowercase_=4 , lowercase_=3 , lowercase_=9_6 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 1_2, 2_4] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[Any]:
super().__init__(**lowercase_)
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(lowercase_)
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(lowercase_) - 1))
__snake_case = ['stem'] + [F"stage{idx}" for idx in range(1 , len(lowercase_) + 1)]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names)
| 676 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 1 |
from collections import deque
from math import floor
from random import random
from time import time
class __lowercase :
def __init__( self) -> int:
__snake_case = {}
def _a ( self , lowercase_ , lowercase_ , lowercase_=1) -> str:
if self.graph.get(lowercase_):
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
__snake_case = [[w, v]]
if not self.graph.get(lowercase_):
__snake_case = []
def _a ( self) -> int:
return list(self.graph)
def _a ( self , lowercase_ , lowercase_) -> Optional[Any]:
if self.graph.get(lowercase_):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_)
def _a ( self , lowercase_=-2 , lowercase_=-1) -> Optional[int]:
if s == d:
return []
__snake_case = []
__snake_case = []
if s == -2:
__snake_case = list(self.graph)[0]
stack.append(lowercase_)
visited.append(lowercase_)
__snake_case = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__snake_case = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(lowercase_)
return visited
else:
stack.append(node[1])
visited.append(node[1])
__snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_) != 0:
__snake_case = stack[len(lowercase_) - 1]
else:
__snake_case = ss
# check if se have reached the starting point
if len(lowercase_) == 0:
return visited
def _a ( self , lowercase_=-1) -> List[Any]:
if c == -1:
__snake_case = floor(random() * 1_0_0_0_0) + 1_0
for i in range(lowercase_):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2) + 1):
__snake_case = floor(random() * c) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1)
def _a ( self , lowercase_=-2) -> List[str]:
__snake_case = deque()
__snake_case = []
if s == -2:
__snake_case = list(self.graph)[0]
d.append(lowercase_)
visited.append(lowercase_)
while d:
__snake_case = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def _a ( self , lowercase_) -> Any:
__snake_case = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _a ( self , lowercase_) -> Dict:
return len(self.graph[u])
def _a ( self , lowercase_=-2) -> Union[str, Any]:
__snake_case = []
__snake_case = []
if s == -2:
__snake_case = list(self.graph)[0]
stack.append(lowercase_)
visited.append(lowercase_)
__snake_case = s
__snake_case = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__snake_case = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__snake_case = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop())
if len(lowercase_) != 0:
__snake_case = stack[len(lowercase_) - 1]
else:
__snake_case = ss
# check if se have reached the starting point
if len(lowercase_) == 0:
return sorted_nodes
def _a ( self) -> Optional[Any]:
__snake_case = []
__snake_case = []
__snake_case = list(self.graph)[0]
stack.append(lowercase_)
visited.append(lowercase_)
__snake_case = -2
__snake_case = []
__snake_case = s
__snake_case = False
__snake_case = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__snake_case = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__snake_case = len(lowercase_) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__snake_case = True
if len(lowercase_) != 0:
__snake_case = stack[len(lowercase_) - 1]
else:
__snake_case = False
indirect_parents.append(lowercase_)
__snake_case = s
__snake_case = ss
# check if se have reached the starting point
if len(lowercase_) == 0:
return list(lowercase_)
def _a ( self) -> List[Any]:
__snake_case = []
__snake_case = []
__snake_case = list(self.graph)[0]
stack.append(lowercase_)
visited.append(lowercase_)
__snake_case = -2
__snake_case = []
__snake_case = s
__snake_case = False
__snake_case = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__snake_case = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__snake_case = len(lowercase_) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__snake_case = True
if len(lowercase_) != 0:
__snake_case = stack[len(lowercase_) - 1]
else:
__snake_case = False
indirect_parents.append(lowercase_)
__snake_case = s
__snake_case = ss
# check if se have reached the starting point
if len(lowercase_) == 0:
return False
def _a ( self , lowercase_=-2 , lowercase_=-1) -> List[Any]:
__snake_case = time()
self.dfs(lowercase_ , lowercase_)
__snake_case = time()
return end - begin
def _a ( self , lowercase_=-2) -> Dict:
__snake_case = time()
self.bfs(lowercase_)
__snake_case = time()
return end - begin
class __lowercase :
def __init__( self) -> str:
__snake_case = {}
def _a ( self , lowercase_ , lowercase_ , lowercase_=1) -> str:
# check if the u exists
if self.graph.get(lowercase_):
# if there already is a edge
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
# if u does not exist
__snake_case = [[w, v]]
# add the other way
if self.graph.get(lowercase_):
# if there already is a edge
if self.graph[v].count([w, u]) == 0:
self.graph[v].append([w, u])
else:
# if u does not exist
__snake_case = [[w, u]]
def _a ( self , lowercase_ , lowercase_) -> Union[str, Any]:
if self.graph.get(lowercase_):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_)
# the other way round
if self.graph.get(lowercase_):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase_)
def _a ( self , lowercase_=-2 , lowercase_=-1) -> Tuple:
if s == d:
return []
__snake_case = []
__snake_case = []
if s == -2:
__snake_case = list(self.graph)[0]
stack.append(lowercase_)
visited.append(lowercase_)
__snake_case = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__snake_case = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(lowercase_)
return visited
else:
stack.append(node[1])
visited.append(node[1])
__snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_) != 0:
__snake_case = stack[len(lowercase_) - 1]
else:
__snake_case = ss
# check if se have reached the starting point
if len(lowercase_) == 0:
return visited
def _a ( self , lowercase_=-1) -> Dict:
if c == -1:
__snake_case = floor(random() * 1_0_0_0_0) + 1_0
for i in range(lowercase_):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2) + 1):
__snake_case = floor(random() * c) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1)
def _a ( self , lowercase_=-2) -> int:
__snake_case = deque()
__snake_case = []
if s == -2:
__snake_case = list(self.graph)[0]
d.append(lowercase_)
visited.append(lowercase_)
while d:
__snake_case = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def _a ( self , lowercase_) -> Union[str, Any]:
return len(self.graph[u])
def _a ( self) -> List[str]:
__snake_case = []
__snake_case = []
__snake_case = list(self.graph)[0]
stack.append(lowercase_)
visited.append(lowercase_)
__snake_case = -2
__snake_case = []
__snake_case = s
__snake_case = False
__snake_case = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__snake_case = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__snake_case = len(lowercase_) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__snake_case = True
if len(lowercase_) != 0:
__snake_case = stack[len(lowercase_) - 1]
else:
__snake_case = False
indirect_parents.append(lowercase_)
__snake_case = s
__snake_case = ss
# check if se have reached the starting point
if len(lowercase_) == 0:
return list(lowercase_)
def _a ( self) -> int:
__snake_case = []
__snake_case = []
__snake_case = list(self.graph)[0]
stack.append(lowercase_)
visited.append(lowercase_)
__snake_case = -2
__snake_case = []
__snake_case = s
__snake_case = False
__snake_case = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__snake_case = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__snake_case = len(lowercase_) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__snake_case = True
if len(lowercase_) != 0:
__snake_case = stack[len(lowercase_) - 1]
else:
__snake_case = False
indirect_parents.append(lowercase_)
__snake_case = s
__snake_case = ss
# check if se have reached the starting point
if len(lowercase_) == 0:
return False
def _a ( self) -> Tuple:
return list(self.graph)
def _a ( self , lowercase_=-2 , lowercase_=-1) -> int:
__snake_case = time()
self.dfs(lowercase_ , lowercase_)
__snake_case = time()
return end - begin
def _a ( self , lowercase_=-2) -> str:
__snake_case = time()
self.bfs(lowercase_)
__snake_case = time()
return end - begin
| 676 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A ( snake_case__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for param in module.parameters():
__snake_case = False
def A ( ) -> Optional[int]:
'''simple docstring'''
__snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__snake_case = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def A ( snake_case__ : int ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = plt.imshow(snake_case__ )
fig.axes.get_xaxis().set_visible(snake_case__ )
fig.axes.get_yaxis().set_visible(snake_case__ )
plt.show()
def A ( ) -> Optional[Any]:
'''simple docstring'''
__snake_case = datetime.now()
__snake_case = current_time.strftime('%H:%M:%S' )
return timestamp
| 676 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
import functools
def A ( snake_case__ : str , snake_case__ : str ) -> int:
'''simple docstring'''
__snake_case = len(snake_case__ )
__snake_case = len(snake_case__ )
@functools.cache
def min_distance(snake_case__ : int , snake_case__ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__snake_case = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case__ ) , 1 + min_distance(snake_case__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
@dataclass
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **lowercase_) -> Optional[int]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__snake_case = deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_))
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}")
__snake_case = kwargs.pop('torchscript' , self.torchscript)
__snake_case = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics)
__snake_case = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level)
super().__init__(**lowercase_)
__UpperCAmelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Trace the models using torchscript'''} )
__UpperCAmelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
__UpperCAmelCase = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def _a ( self) -> Tuple["torch.device", int]:
requires_backends(self , ['torch'])
logger.info('PyTorch: setting up devices')
if not self.cuda:
__snake_case = torch.device('cpu')
__snake_case = 0
elif is_torch_tpu_available():
__snake_case = xm.xla_device()
__snake_case = 0
else:
__snake_case = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
__snake_case = torch.cuda.device_count()
return device, n_gpu
@property
def _a ( self) -> str:
return is_torch_tpu_available() and self.tpu
@property
def _a ( self) -> int:
requires_backends(self , ['torch'])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _a ( self) -> "torch.device":
requires_backends(self , ['torch'])
return self._setup_devices[0]
@property
def _a ( self) -> Optional[int]:
requires_backends(self , ['torch'])
return self._setup_devices[1]
@property
def _a ( self) -> int:
return self.n_gpu > 0
| 676 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 1 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase__ : Dict = get_tests_dir("fixtures")
UpperCAmelCase__ : Optional[int] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCAmelCase__ : str = get_tests_dir("fixtures/dummy-config.json")
class __lowercase ( unittest.TestCase ):
def _a ( self) -> Union[str, Any]:
__snake_case = 0
def _a ( self) -> Optional[int]:
__snake_case = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h')
self.assertIsInstance(lowercase_ , lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = AutoFeatureExtractor.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
def _a ( self) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__snake_case = AutoFeatureExtractor.from_pretrained(lowercase_).to_dict()
config_dict.pop('feature_extractor_type')
__snake_case = WavaVecaFeatureExtractor(**lowercase_)
# save in new folder
model_config.save_pretrained(lowercase_)
config.save_pretrained(lowercase_)
__snake_case = AutoFeatureExtractor.from_pretrained(lowercase_)
# make sure private variable is not incorrectly saved
__snake_case = json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(lowercase_ , lowercase_)
def _a ( self) -> List[Any]:
__snake_case = AutoFeatureExtractor.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
def _a ( self) -> List[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier'):
__snake_case = AutoFeatureExtractor.from_pretrained('bert-base')
def _a ( self) -> Tuple:
with self.assertRaisesRegex(
lowercase_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__snake_case = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa')
def _a ( self) -> Tuple:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__snake_case = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model')
def _a ( self) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_):
__snake_case = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_):
__snake_case = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_)
__snake_case = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_)
__snake_case = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_)
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
def _a ( self) -> Tuple:
try:
AutoConfig.register('custom' , lowercase_)
AutoFeatureExtractor.register(lowercase_ , lowercase_)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_):
AutoFeatureExtractor.register(lowercase_ , lowercase_)
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case = CustomFeatureExtractor.from_pretrained(lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_)
__snake_case = AutoFeatureExtractor.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _a ( self) -> Optional[Any]:
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = True
try:
AutoConfig.register('custom' , lowercase_)
AutoFeatureExtractor.register(lowercase_ , lowercase_)
# If remote code is not set, the default is to use local
__snake_case = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor')
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(feature_extractor.is_local)
# If remote code is disabled, we load the local one.
__snake_case = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(feature_extractor.is_local)
# If remote is enabled, we load from the Hub
__snake_case = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(not hasattr(lowercase_ , 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 676 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase = 1
@register_to_config
def __init__( self , lowercase_ = 2_0_0_0 , lowercase_ = 0.15 , lowercase_ = 0.01 , lowercase_ = 1348.0 , lowercase_ = 1e-5 , lowercase_ = 1 , ) -> List[str]:
# standard deviation of the initial noise distribution
__snake_case = sigma_max
# setable values
__snake_case = None
self.set_sigmas(lowercase_ , lowercase_ , lowercase_ , lowercase_)
def _a ( self , lowercase_ , lowercase_ = None) -> torch.FloatTensor:
return sample
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None) -> List[Any]:
__snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__snake_case = torch.linspace(1 , lowercase_ , lowercase_ , device=lowercase_)
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None) -> List[Any]:
__snake_case = sigma_min if sigma_min is not None else self.config.sigma_min
__snake_case = sigma_max if sigma_max is not None else self.config.sigma_max
__snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowercase_ , lowercase_)
__snake_case = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__snake_case = torch.exp(torch.linspace(math.log(lowercase_) , math.log(lowercase_) , lowercase_))
__snake_case = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def _a ( self , lowercase_ , lowercase_) -> List[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device)) , self.discrete_sigmas[timesteps - 1].to(timesteps.device) , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
__snake_case = timestep * torch.ones(
sample.shape[0] , device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
__snake_case = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__snake_case = timesteps.to(self.discrete_sigmas.device)
__snake_case = self.discrete_sigmas[timesteps].to(sample.device)
__snake_case = self.get_adjacent_sigma(lowercase_ , lowercase_).to(sample.device)
__snake_case = torch.zeros_like(lowercase_)
__snake_case = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__snake_case = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
__snake_case = diffusion.unsqueeze(-1)
__snake_case = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__snake_case = randn_tensor(
sample.shape , layout=sample.layout , generator=lowercase_ , device=sample.device , dtype=sample.dtype)
__snake_case = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__snake_case = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowercase_ , prev_sample_mean=lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__snake_case = randn_tensor(sample.shape , layout=sample.layout , generator=lowercase_).to(sample.device)
# compute step size from the model_output, the noise, and the snr
__snake_case = torch.norm(model_output.reshape(model_output.shape[0] , -1) , dim=-1).mean()
__snake_case = torch.norm(noise.reshape(noise.shape[0] , -1) , dim=-1).mean()
__snake_case = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__snake_case = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__snake_case = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
__snake_case = step_size.unsqueeze(-1)
__snake_case = sample + step_size * model_output
__snake_case = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__snake_case = timesteps.to(original_samples.device)
__snake_case = self.discrete_sigmas.to(original_samples.device)[timesteps]
__snake_case = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowercase_) * sigmas[:, None, None, None]
)
__snake_case = noise + original_samples
return noisy_samples
def __len__( self) -> Any:
return self.config.num_train_timesteps
| 676 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 1 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
UpperCAmelCase__ : Dict = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def A ( snake_case__ : Tuple ) -> str:
'''simple docstring'''
__snake_case = {}
state_dict.pop('pixel_mean' , snake_case__ )
state_dict.pop('pixel_std' , snake_case__ )
__snake_case = r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case = key.replace(snake_case__ , snake_case__ )
if re.match(snake_case__ , snake_case__ ):
__snake_case = int(re.match(snake_case__ , snake_case__ ).group(2 ) )
if layer_nb == 0:
__snake_case = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
__snake_case = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
__snake_case = key.replace('layers.2' , 'proj_out' )
__snake_case = value
__snake_case = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def A ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Tuple="ybelkada/segment-anything" ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_download(snake_case__ , f"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
__snake_case = SamConfig()
elif "sam_vit_l" in model_name:
__snake_case = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__snake_case = SamConfig(
vision_config=snake_case__ , )
elif "sam_vit_h" in model_name:
__snake_case = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__snake_case = SamConfig(
vision_config=snake_case__ , )
__snake_case = torch.load(snake_case__ , map_location='cpu' )
__snake_case = replace_keys(snake_case__ )
__snake_case = SamImageProcessor()
__snake_case = SamProcessor(image_processor=snake_case__ )
__snake_case = SamModel(snake_case__ )
hf_model.load_state_dict(snake_case__ )
__snake_case = hf_model.to('cuda' )
__snake_case = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = [[[400, 650]]]
__snake_case = [[1]]
__snake_case = processor(images=np.array(snake_case__ ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__snake_case = hf_model(**snake_case__ )
__snake_case = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
__snake_case = processor(
images=np.array(snake_case__ ) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__snake_case = hf_model(**snake_case__ )
__snake_case = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
__snake_case = ((75, 275, 1725, 850),)
__snake_case = processor(images=np.array(snake_case__ ) , input_boxes=snake_case__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__snake_case = hf_model(**snake_case__ )
__snake_case = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
__snake_case = [[[400, 650], [800, 650]]]
__snake_case = [[1, 1]]
__snake_case = processor(
images=np.array(snake_case__ ) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__snake_case = hf_model(**snake_case__ )
__snake_case = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
UpperCAmelCase__ : str = argparse.ArgumentParser()
UpperCAmelCase__ : Union[str, Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
UpperCAmelCase__ : Dict = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 676 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=False , lowercase_=2 , lowercase_=9_9 , lowercase_=0 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_2 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_="last" , lowercase_=None , lowercase_=None , ) -> Union[str, Any]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_lengths
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = gelu_activation
__snake_case = sinusoidal_embeddings
__snake_case = causal
__snake_case = asm
__snake_case = n_langs
__snake_case = vocab_size
__snake_case = n_special
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = summary_type
__snake_case = use_proj
__snake_case = scope
def _a ( self) -> Dict:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_input_lengths:
__snake_case = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , 2).float()
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self) -> str:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = FlaubertModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , lengths=lowercase_ , langs=lowercase_)
__snake_case = model(lowercase_ , langs=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = FlaubertWithLMHeadModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Dict:
__snake_case = FlaubertForQuestionAnsweringSimple(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
__snake_case = model(lowercase_ , start_positions=lowercase_ , end_positions=lowercase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
__snake_case = FlaubertForQuestionAnswering(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
__snake_case = model(
lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , cls_index=lowercase_ , is_impossible=lowercase_ , p_mask=lowercase_ , )
__snake_case = model(
lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , cls_index=lowercase_ , is_impossible=lowercase_ , )
((__snake_case) , ) = result_with_labels.to_tuple()
__snake_case = model(lowercase_ , start_positions=lowercase_ , end_positions=lowercase_)
((__snake_case) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[Any]:
__snake_case = FlaubertForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
__snake_case = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
__snake_case = self.num_labels
__snake_case = FlaubertForTokenClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[Any]:
__snake_case = self.num_choices
__snake_case = FlaubertForMultipleChoice(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__snake_case = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__snake_case = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _a ( self) -> Optional[int]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Dict:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self , lowercase_ , lowercase_ , lowercase_=False) -> str:
__snake_case = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
return inputs_dict
def _a ( self) -> Optional[int]:
__snake_case = FlaubertModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , emb_dim=3_7)
def _a ( self) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase_)
def _a ( self) -> Any:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase_)
def _a ( self) -> Any:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase_)
def _a ( self) -> Tuple:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase_)
def _a ( self) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase_)
def _a ( self) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase_)
@slow
def _a ( self) -> Optional[int]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = FlaubertModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@slow
@require_torch_gpu
def _a ( self) -> Optional[int]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__snake_case = True
__snake_case = model_class(config=lowercase_)
__snake_case = self._prepare_for_class(lowercase_ , lowercase_)
__snake_case = torch.jit.trace(
lowercase_ , (inputs_dict['input_ids'].to('cpu'), inputs_dict['attention_mask'].to('cpu')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase_ , os.path.join(lowercase_ , 'traced_model.pt'))
__snake_case = torch.jit.load(os.path.join(lowercase_ , 'traced_model.pt') , map_location=lowercase_)
loaded(inputs_dict['input_ids'].to(lowercase_) , inputs_dict['attention_mask'].to(lowercase_))
@require_torch
class __lowercase ( unittest.TestCase ):
@slow
def _a ( self) -> List[str]:
__snake_case = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased')
__snake_case = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
with torch.no_grad():
__snake_case = model(lowercase_)[0]
__snake_case = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , lowercase_)
__snake_case = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4))
| 676 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 | 1 |
import requests
UpperCAmelCase__ : Any = "" # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase__ : List[Any] = "https://api.openweathermap.org/data/2.5/"
def A ( snake_case__ : str = "Chicago" , snake_case__ : str = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def A ( snake_case__ : str = "Kolkata, India" , snake_case__ : str = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def A ( snake_case__ : float = 55.68 , snake_case__ : float = 12.57 , snake_case__ : str = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase__ : List[Any] = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 676 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = None
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase = 2
@register_to_config
def __init__( self , lowercase_ = 0.02 , lowercase_ = 1_0_0 , lowercase_ = 1.007 , lowercase_ = 8_0 , lowercase_ = 0.05 , lowercase_ = 5_0 , ) -> Any:
# standard deviation of the initial noise distribution
__snake_case = sigma_max
# setable values
__snake_case = None
__snake_case = None
__snake_case = None # sigma(t_i)
def _a ( self , lowercase_ , lowercase_ = None) -> torch.FloatTensor:
return sample
def _a ( self , lowercase_ , lowercase_ = None) -> Tuple:
__snake_case = num_inference_steps
__snake_case = np.arange(0 , self.num_inference_steps)[::-1].copy()
__snake_case = torch.from_numpy(lowercase_).to(lowercase_)
__snake_case = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__snake_case = torch.tensor(lowercase_ , dtype=torch.floataa , device=lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ = None) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
__snake_case = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1)
else:
__snake_case = 0
# sample eps ~ N(0, S_noise^2 * I)
__snake_case = self.config.s_noise * randn_tensor(sample.shape , generator=lowercase_).to(sample.device)
__snake_case = sigma + gamma * sigma
__snake_case = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = True , ) -> Union[KarrasVeOutput, Tuple]:
__snake_case = sample_hat + sigma_hat * model_output
__snake_case = (sample_hat - pred_original_sample) / sigma_hat
__snake_case = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase_ , derivative=lowercase_ , pred_original_sample=lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = True , ) -> Union[KarrasVeOutput, Tuple]:
__snake_case = sample_prev + sigma_prev * model_output
__snake_case = (sample_prev - pred_original_sample) / sigma_prev
__snake_case = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase_ , derivative=lowercase_ , pred_original_sample=lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Union[str, Any]:
raise NotImplementedError()
| 676 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 1 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCAmelCase = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self , lowercase_ , lowercase_ , lowercase_=False) -> Union[str, Any]:
__snake_case = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if return_labels:
if model_class in get_values(lowercase_):
__snake_case = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=3_2 , lowercase_=2 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Tuple:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
__snake_case = embedding_size
def _a ( self) -> Optional[int]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[str]:
__snake_case = TFMobileBertModel(config=lowercase_)
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__snake_case = model(lowercase_)
__snake_case = [input_ids, input_mask]
__snake_case = model(lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = TFMobileBertForMaskedLM(config=lowercase_)
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__snake_case = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> int:
__snake_case = TFMobileBertForNextSentencePrediction(config=lowercase_)
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__snake_case = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
__snake_case = TFMobileBertForPreTraining(config=lowercase_)
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__snake_case = model(lowercase_)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
__snake_case = self.num_labels
__snake_case = TFMobileBertForSequenceClassification(config=lowercase_)
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__snake_case = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Union[str, Any]:
__snake_case = self.num_choices
__snake_case = TFMobileBertForMultipleChoice(config=lowercase_)
__snake_case = tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
__snake_case = tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
__snake_case = tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
__snake_case = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__snake_case = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Tuple:
__snake_case = self.num_labels
__snake_case = TFMobileBertForTokenClassification(config=lowercase_)
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__snake_case = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[str]:
__snake_case = TFMobileBertForQuestionAnswering(config=lowercase_)
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__snake_case = model(lowercase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _a ( self) -> Tuple:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def _a ( self) -> str:
__snake_case = TFMobileBertModelTest.TFMobileBertModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> List[str]:
self.config_tester.run_common_tests()
def _a ( self) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase_)
def _a ( self) -> List[str]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase_)
def _a ( self) -> str:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase_)
def _a ( self) -> Tuple:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase_)
def _a ( self) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase_)
def _a ( self) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase_)
def _a ( self) -> List[str]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase_)
@slow
def _a ( self) -> Optional[Any]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__snake_case = TFMobileBertModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@require_tf
class __lowercase ( unittest.TestCase ):
@slow
def _a ( self) -> Tuple:
__snake_case = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased')
__snake_case = tf.constant([[0, 1, 2, 3, 4, 5]])
__snake_case = model(lowercase_)[0]
__snake_case = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , lowercase_)
__snake_case = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
])
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-4)
| 676 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = "Hello, World!"
UpperCAmelCase__ : Optional[Any] = "en_XX"
def A ( snake_case__ : str , snake_case__ : str , snake_case__ : bool ) -> List[Any]:
'''simple docstring'''
__snake_case = Path('data_bin' )
__snake_case = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(snake_case__ ) , bpe='sentencepiece' , sentencepiece_model=str(Path(snake_case__ ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__snake_case = xmod.model.encoder.sentence_encoder
__snake_case = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__snake_case = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , snake_case__ )
__snake_case = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__snake_case = xmod_sent_encoder.embed_tokens.weight
__snake_case = xmod_sent_encoder.embed_positions.weight
__snake_case = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__snake_case = xmod_sent_encoder.layernorm_embedding.weight
__snake_case = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__snake_case = model.roberta.encoder.layer[i]
__snake_case = xmod_sent_encoder.layers[i]
# self attention
__snake_case = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__snake_case = xmod_layer.self_attn.q_proj.weight
__snake_case = xmod_layer.self_attn.q_proj.bias
__snake_case = xmod_layer.self_attn.k_proj.weight
__snake_case = xmod_layer.self_attn.k_proj.bias
__snake_case = xmod_layer.self_attn.v_proj.weight
__snake_case = xmod_layer.self_attn.v_proj.bias
# self-attention output
__snake_case = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__snake_case = xmod_layer.self_attn.out_proj.weight
__snake_case = xmod_layer.self_attn.out_proj.bias
__snake_case = xmod_layer.self_attn_layer_norm.weight
__snake_case = xmod_layer.self_attn_layer_norm.bias
# intermediate
__snake_case = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__snake_case = xmod_layer.fca.weight
__snake_case = xmod_layer.fca.bias
# output
__snake_case = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__snake_case = xmod_layer.fca.weight
__snake_case = xmod_layer.fca.bias
__snake_case = xmod_layer.final_layer_norm.weight
__snake_case = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__snake_case = xmod_layer.adapter_layer_norm.weight
__snake_case = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__snake_case = bert_output.adapter_modules[lang_code]
__snake_case = xmod_layer.adapter_modules[lang_code]
__snake_case = from_adapter.fca.weight
__snake_case = from_adapter.fca.bias
__snake_case = from_adapter.fca.weight
__snake_case = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__snake_case = xmod_sent_encoder.layer_norm.weight
__snake_case = xmod_sent_encoder.layer_norm.bias
if classification_head:
__snake_case = xmod.model.classification_heads['mnli'].dense.weight
__snake_case = xmod.model.classification_heads['mnli'].dense.bias
__snake_case = xmod.model.classification_heads['mnli'].out_proj.weight
__snake_case = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__snake_case = xmod.model.encoder.lm_head.dense.weight
__snake_case = xmod.model.encoder.lm_head.dense.bias
__snake_case = xmod.model.encoder.lm_head.layer_norm.weight
__snake_case = xmod.model.encoder.lm_head.layer_norm.bias
__snake_case = xmod.model.encoder.lm_head.weight
__snake_case = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__snake_case = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__snake_case = model(snake_case__ )[0]
if classification_head:
__snake_case = xmod.model.classification_heads['mnli'](xmod.extract_features(snake_case__ ) )
else:
__snake_case = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__snake_case = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__snake_case = torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 676 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 1 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : Dict = logging.get_logger()
def A ( snake_case__ : int , snake_case__ : str , snake_case__ : LevitConfig , snake_case__ : Path , snake_case__ : bool = True ) -> Union[str, Any]:
'''simple docstring'''
print(f"Converting {name}..." )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__snake_case = timm.create_model('levit_128s' , pretrained=snake_case__ )
else:
__snake_case = timm.create_model('levit_128' , pretrained=snake_case__ )
if hidden_sizes == 192:
__snake_case = timm.create_model('levit_192' , pretrained=snake_case__ )
if hidden_sizes == 256:
__snake_case = timm.create_model('levit_256' , pretrained=snake_case__ )
if hidden_sizes == 384:
__snake_case = timm.create_model('levit_384' , pretrained=snake_case__ )
from_model.eval()
__snake_case = LevitForImageClassificationWithTeacher(snake_case__ ).eval()
__snake_case = OrderedDict()
__snake_case = from_model.state_dict()
__snake_case = list(from_model.state_dict().keys() )
__snake_case = list(our_model.state_dict().keys() )
print(len(snake_case__ ) , len(snake_case__ ) )
for i in range(len(snake_case__ ) ):
__snake_case = weights[og_keys[i]]
our_model.load_state_dict(snake_case__ )
__snake_case = torch.randn((2, 3, 224, 224) )
__snake_case = from_model(snake_case__ )
__snake_case = our_model(snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ ), "The model logits don't match the original one."
__snake_case = name
print(snake_case__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__snake_case = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"Pushed {checkpoint_name}" )
def A ( snake_case__ : Path , snake_case__ : str = None , snake_case__ : bool = True ) -> List[Any]:
'''simple docstring'''
__snake_case = 'imagenet-1k-id2label.json'
__snake_case = 1000
__snake_case = (1, num_labels)
__snake_case = 'huggingface/label-files'
__snake_case = num_labels
__snake_case = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
__snake_case = {int(snake_case__ ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
__snake_case = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
__snake_case = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
UpperCAmelCase__ : List[str] = parser.parse_args()
UpperCAmelCase__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 676 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 1 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase__ : Union[str, Any] = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def A ( snake_case__ : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case = test_results.split(' ' )
__snake_case = 0
__snake_case = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__snake_case = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(snake_case__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A ( snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
__snake_case = {}
__snake_case = None
__snake_case = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , snake_case__ ):
__snake_case = True
__snake_case = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
__snake_case = line
__snake_case = False
return failures
class __lowercase :
def __init__( self , lowercase_ , lowercase_) -> int:
__snake_case = title
__snake_case = doc_test_results['time_spent'].split(',')[0]
__snake_case = doc_test_results['success']
__snake_case = doc_test_results['failures']
__snake_case = self.n_success + self.n_failures
# Failures and success of the modeling tests
__snake_case = doc_test_results
@property
def _a ( self) -> str:
__snake_case = [self._time_spent]
__snake_case = 0
for time in time_spent:
__snake_case = time.split(':')
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowercase_) == 1:
__snake_case = [0, 0, time_parts[0]]
__snake_case , __snake_case , __snake_case = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
__snake_case , __snake_case , __snake_case = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return F"{int(lowercase_)}h{int(lowercase_)}m{int(lowercase_)}s"
@property
def _a ( self) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _a ( self) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _a ( self) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
F" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _a ( self) -> Dict:
__snake_case = 4_0
__snake_case = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(lowercase_ , lowercase_)}
__snake_case = ''
for category, failures in category_failures.items():
if len(lowercase_) == 0:
continue
if report != "":
report += "\n\n"
report += F"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n"
report += "`"
report += "`\n`".join(lowercase_)
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _a ( self) -> str:
__snake_case = [self.header]
if self.n_failures > 0:
blocks.append(self.failures)
if self.n_failures > 0:
blocks.extend([self.category_failures])
if self.n_failures == 0:
blocks.append(self.no_failures)
return json.dumps(lowercase_)
@staticmethod
def _a ( ) -> List[str]:
__snake_case = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('Sending the following payload')
print(json.dumps({'blocks': json.loads(lowercase_)}))
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=lowercase_ , )
def _a ( self) -> List[Any]:
print('Sending the following payload')
print(json.dumps({'blocks': json.loads(self.payload)}))
__snake_case = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else 'All tests passed.'
__snake_case = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str:
__snake_case = ''
for key, value in failures.items():
__snake_case = value[:2_0_0] + ' [Truncated]' if len(lowercase_) > 2_5_0 else value
failures_text += F"*{key}*\n_{value}_\n\n"
__snake_case = job_name
__snake_case = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
__snake_case = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _a ( self) -> Tuple:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.')
__snake_case = self.doc_test_results.pop('job_link')
self.doc_test_results.pop('failures')
self.doc_test_results.pop('success')
self.doc_test_results.pop('time_spent')
__snake_case = sorted(self.doc_test_results.items() , key=lambda lowercase_: t[0])
for job, job_result in sorted_dict:
if len(job_result['failures']):
__snake_case = F"*Num failures* :{len(job_result['failed'])} \n"
__snake_case = job_result['failures']
__snake_case = self.get_reply_blocks(lowercase_ , lowercase_ , lowercase_ , text=lowercase_)
print('Sending the following reply')
print(json.dumps({'blocks': blocks}))
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F"Results for {job}" , blocks=lowercase_ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1)
def A ( ) -> Dict:
'''simple docstring'''
__snake_case = os.environ['GITHUB_RUN_ID']
__snake_case = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__snake_case = requests.get(snake_case__ ).json()
__snake_case = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
__snake_case = math.ceil((result['total_count'] - 100) / 100 )
for i in range(snake_case__ ):
__snake_case = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , snake_case__ )
return {}
def A ( snake_case__ : str ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if os.path.exists(snake_case__ ):
__snake_case = os.listdir(snake_case__ )
for file in files:
try:
with open(os.path.join(snake_case__ , snake_case__ ) , encoding='utf-8' ) as f:
__snake_case = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(snake_case__ , snake_case__ )}." ) from e
return _artifact
def A ( ) -> List[Any]:
'''simple docstring'''
class __lowercase :
def __init__( self , lowercase_) -> int:
__snake_case = name
__snake_case = []
def __str__( self) -> List[Any]:
return self.name
def _a ( self , lowercase_) -> int:
self.paths.append({'name': self.name, 'path': path})
__snake_case = {}
__snake_case = filter(os.path.isdir , os.listdir() )
for directory in directories:
__snake_case = directory
if artifact_name not in _available_artifacts:
__snake_case = Artifact(snake_case__ )
_available_artifacts[artifact_name].add_path(snake_case__ )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase__ : int = get_job_links()
UpperCAmelCase__ : Tuple = retrieve_available_artifacts()
UpperCAmelCase__ : List[Any] = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase__ : str = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase__ : str = github_actions_job_links.get("run_doctests")
UpperCAmelCase__ : Optional[int] = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
UpperCAmelCase__ : Any = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = handle_test_results(artifact["stats"])
UpperCAmelCase__ : Tuple = failed
UpperCAmelCase__ : str = success
UpperCAmelCase__ : Optional[Any] = time_spent[1:-1] + ", "
UpperCAmelCase__ : int = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
UpperCAmelCase__ : Union[str, Any] = line.replace("FAILED ", "")
UpperCAmelCase__ : Optional[int] = line.split()[0].replace("\n", "")
if "::" in line:
UpperCAmelCase__ , UpperCAmelCase__ : Any = line.split("::")
else:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase__ : str = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase__ : Any = all_failures[test] if test in all_failures else "N/A"
UpperCAmelCase__ : Tuple = failure
break
UpperCAmelCase__ : Dict = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 676 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "spiece.model"}
UpperCAmelCase__ : Optional[int] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_="<s>" , lowercase_="</s>" , lowercase_="<unk>" , lowercase_="<sep>" , lowercase_="<pad>" , lowercase_="<cls>" , lowercase_="<mask>" , lowercase_=["<eop>", "<eod>"] , lowercase_ = None , **lowercase_ , ) -> None:
__snake_case = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
__snake_case = 3
__snake_case = do_lower_case
__snake_case = remove_space
__snake_case = keep_accents
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.')
__snake_case = jieba
__snake_case = str.maketrans(' \n' , '\u2582\u2583')
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _a ( self) -> Dict:
return len(self.sp_model)
def _a ( self) -> List[str]:
__snake_case = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Dict:
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self , lowercase_) -> Dict:
__snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _a ( self , lowercase_) -> List[str]:
if self.remove_space:
__snake_case = ' '.join(inputs.strip().split())
else:
__snake_case = inputs
__snake_case = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
__snake_case = unicodedata.normalize('NFKD' , lowercase_)
__snake_case = ''.join([c for c in outputs if not unicodedata.combining(lowercase_)])
if self.do_lower_case:
__snake_case = outputs.lower()
return outputs
def _a ( self , lowercase_) -> List[str]:
__snake_case = self.preprocess_text(lowercase_)
__snake_case = self.sp_model.encode(lowercase_ , out_type=lowercase_)
__snake_case = []
for piece in pieces:
if len(lowercase_) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__snake_case = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase_ , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__snake_case = cur_pieces[1:]
else:
__snake_case = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowercase_)
else:
new_pieces.append(lowercase_)
return new_pieces
def _a ( self , lowercase_) -> Optional[Any]:
return self.sp_model.PieceToId(lowercase_)
def _a ( self , lowercase_) -> Any:
return self.sp_model.IdToPiece(lowercase_)
def _a ( self , lowercase_) -> Dict:
__snake_case = ''.join(lowercase_).replace(lowercase_ , ' ').strip()
return out_string
def _a ( self , lowercase_ , lowercase_ = None) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
if token_ids_a is not None:
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1, 1]
return ([0] * len(lowercase_)) + [1, 1]
def _a ( self , lowercase_ , lowercase_ = None) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _a ( self , lowercase_ , lowercase_ = None) -> Tuple[str]:
if not os.path.isdir(lowercase_):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__snake_case = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , 'wb') as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
def _a ( self , *lowercase_ , **lowercase_) -> Optional[Any]:
__snake_case = super()._decode(*lowercase_ , **lowercase_)
__snake_case = text.replace(' ' , '').replace('\u2582' , ' ').replace('\u2583' , '\n')
return text
| 676 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
def A ( snake_case__ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
'''simple docstring'''
if isinstance(snake_case__ , np.ndarray ):
return list(tensor.shape )
__snake_case = tf.shape(snake_case__ )
if tensor.shape == tf.TensorShape(snake_case__ ):
return dynamic
__snake_case = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(snake_case__ )]
def A ( snake_case__ : tf.Tensor , snake_case__ : Optional[int] = None , snake_case__ : Optional[str] = None ) -> tf.Tensor:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1e-9 , axis=snake_case__ , name=snake_case__ )
def A ( snake_case__ : Any , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Tuple=1e-5 , snake_case__ : List[str]=-1 ) -> List[Any]:
'''simple docstring'''
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case__ , snake_case__ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
__snake_case , __snake_case = tf.nn.moments(snake_case__ , axes=[axis] , keepdims=snake_case__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__snake_case = [1] * inputs.shape.rank
__snake_case = shape_list(snake_case__ )[axis]
__snake_case = tf.reshape(snake_case__ , snake_case__ )
__snake_case = tf.reshape(snake_case__ , snake_case__ )
# Compute layer normalization using the batch_normalization
# function.
__snake_case = tf.nn.batch_normalization(
snake_case__ , snake_case__ , snake_case__ , offset=snake_case__ , scale=snake_case__ , variance_epsilon=snake_case__ , )
return outputs
def A ( snake_case__ : Any , snake_case__ : str=0 , snake_case__ : str=-1 ) -> Optional[int]:
'''simple docstring'''
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__snake_case = tf.shape(snake_case__ )
__snake_case = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__snake_case = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(snake_case__ , snake_case__ )
def A ( snake_case__ : tf.Tensor ) -> tf.Tensor:
'''simple docstring'''
if not isinstance(snake_case__ , tf.Tensor ):
__snake_case = tf.convert_to_tensor(snake_case__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__snake_case = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__snake_case = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__snake_case = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def A ( snake_case__ : tf.Tensor , snake_case__ : int , snake_case__ : str = "input_ids" ) -> None:
'''simple docstring'''
tf.debugging.assert_less(
snake_case__ , tf.cast(snake_case__ , dtype=tensor.dtype ) , message=(
f"The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case__ )}) must be smaller than the embedding "
f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : str ) -> Tuple:
'''simple docstring'''
__snake_case = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__snake_case = [x for x in data if len(snake_case__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
f"bytes: {bad_attributes}" )
__snake_case = np.asarray(snake_case__ )
__snake_case = 1
__snake_case = np.array_split(snake_case__ , snake_case__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__snake_case = np.array_split(snake_case__ , snake_case__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(snake_case__ ):
__snake_case = chunk_data
else:
__snake_case = data
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if name in group.attrs:
__snake_case = [n.decode('utf8' ) if hasattr(snake_case__ , 'decode' ) else n for n in group.attrs[name]]
else:
__snake_case = []
__snake_case = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(snake_case__ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def A ( snake_case__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
def _expand_single_ad_tensor(snake_case__ : List[Any] ):
if isinstance(snake_case__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(snake_case__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , snake_case__ )
| 676 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 1 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
class __lowercase :
def __init__( self , lowercase_ , lowercase_) -> Union[str, Any]:
__snake_case = question_encoder
__snake_case = generator
__snake_case = self.question_encoder
def _a ( self , lowercase_) -> List[str]:
if os.path.isfile(lowercase_):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file")
os.makedirs(lowercase_ , exist_ok=lowercase_)
__snake_case = os.path.join(lowercase_ , 'question_encoder_tokenizer')
__snake_case = os.path.join(lowercase_ , 'generator_tokenizer')
self.question_encoder.save_pretrained(lowercase_)
self.generator.save_pretrained(lowercase_)
@classmethod
def _a ( cls , lowercase_ , **lowercase_) -> List[Any]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__snake_case = kwargs.pop('config' , lowercase_)
if config is None:
__snake_case = RagConfig.from_pretrained(lowercase_)
__snake_case = AutoTokenizer.from_pretrained(
lowercase_ , config=config.question_encoder , subfolder='question_encoder_tokenizer')
__snake_case = AutoTokenizer.from_pretrained(
lowercase_ , config=config.generator , subfolder='generator_tokenizer')
return cls(question_encoder=lowercase_ , generator=lowercase_)
def __call__( self , *lowercase_ , **lowercase_) -> Tuple:
return self.current_tokenizer(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> str:
return self.generator.batch_decode(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> List[str]:
return self.generator.decode(*lowercase_ , **lowercase_)
def _a ( self) -> int:
__snake_case = self.question_encoder
def _a ( self) -> Optional[int]:
__snake_case = self.generator
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = "longest" , lowercase_ = None , lowercase_ = True , **lowercase_ , ) -> BatchEncoding:
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , lowercase_ , )
if max_length is None:
__snake_case = self.current_tokenizer.model_max_length
__snake_case = self(
lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , max_length=lowercase_ , padding=lowercase_ , truncation=lowercase_ , **lowercase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__snake_case = self.current_tokenizer.model_max_length
__snake_case = self(
text_target=lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , **lowercase_ , )
__snake_case = labels['input_ids']
return model_inputs
| 676 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : List[str] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
UpperCAmelCase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 676 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 | 1 |
from __future__ import annotations
from math import pi
def A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ) -> dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A ( snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
# Initialise PyTorch model
__snake_case = BertConfig.from_json_file(snake_case__ )
print(f"Building PyTorch model from configuration: {config}" )
__snake_case = BertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 676 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __lowercase ( lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = BertJapaneseTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = True
def _a ( self) -> List[Any]:
super().setUp()
__snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def _a ( self , lowercase_) -> Union[str, Any]:
__snake_case = 'こんにちは、世界。 \nこんばんは、世界。'
__snake_case = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def _a ( self , lowercase_) -> Dict:
__snake_case , __snake_case = self.get_input_output_texts(lowercase_)
__snake_case = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
__snake_case = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_)
return text, ids
def _a ( self) -> Optional[int]:
pass # TODO add if relevant
def _a ( self) -> List[str]:
pass # TODO add if relevant
def _a ( self) -> Optional[Any]:
pass # TODO add if relevant
def _a ( self) -> int:
__snake_case = self.tokenizer_class(self.vocab_file)
__snake_case = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。')
self.assertListEqual(lowercase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
def _a ( self) -> List[str]:
__snake_case = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab')
self.assertIsNotNone(lowercase_)
__snake_case = 'こんにちは、世界。\nこんばんは、世界。'
__snake_case = tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__snake_case = os.path.join(self.tmpdirname , 'tokenizer.bin')
with open(lowercase_ , 'wb') as handle:
pickle.dump(lowercase_ , lowercase_)
with open(lowercase_ , 'rb') as handle:
__snake_case = pickle.load(lowercase_)
__snake_case = tokenizer_new.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def _a ( self) -> List[str]:
__snake_case = MecabTokenizer(mecab_dic='ipadic')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def _a ( self) -> List[Any]:
try:
__snake_case = MecabTokenizer(mecab_dic='unidic_lite')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def _a ( self) -> Any:
try:
__snake_case = MecabTokenizer(mecab_dic='unidic')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def _a ( self) -> Union[str, Any]:
__snake_case = MecabTokenizer(do_lower_case=lowercase_ , mecab_dic='ipadic')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def _a ( self) -> int:
try:
__snake_case = MecabTokenizer(
do_lower_case=lowercase_ , normalize_text=lowercase_ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic')
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def _a ( self) -> Any:
__snake_case = MecabTokenizer(normalize_text=lowercase_ , mecab_dic='ipadic')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def _a ( self) -> List[str]:
__snake_case = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi')
self.assertIsNotNone(lowercase_)
__snake_case = 'こんにちは、世界。\nこんばんは、世界。'
__snake_case = tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__snake_case = os.path.join(self.tmpdirname , 'tokenizer.bin')
with open(lowercase_ , 'wb') as handle:
pickle.dump(lowercase_ , lowercase_)
with open(lowercase_ , 'rb') as handle:
__snake_case = pickle.load(lowercase_)
__snake_case = tokenizer_new.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
@require_sudachi
def _a ( self) -> Optional[Any]:
__snake_case = SudachiTokenizer(sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def _a ( self) -> Tuple:
__snake_case = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A')
self.assertListEqual(tokenizer.tokenize('外国人参政権') , ['外国', '人', '参政', '権'])
@require_sudachi
def _a ( self) -> Any:
__snake_case = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B')
self.assertListEqual(tokenizer.tokenize('外国人参政権') , ['外国人', '参政権'])
@require_sudachi
def _a ( self) -> Optional[int]:
__snake_case = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C')
self.assertListEqual(tokenizer.tokenize('外国人参政権') , ['外国人参政権'])
@require_sudachi
def _a ( self) -> Union[str, Any]:
__snake_case = SudachiTokenizer(do_lower_case=lowercase_ , sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def _a ( self) -> List[Any]:
__snake_case = SudachiTokenizer(normalize_text=lowercase_ , sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def _a ( self) -> str:
__snake_case = SudachiTokenizer(trim_whitespace=lowercase_ , sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def _a ( self) -> List[Any]:
__snake_case = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp')
self.assertIsNotNone(lowercase_)
__snake_case = 'こんにちは、世界。\nこんばんは、世界。'
__snake_case = tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__snake_case = os.path.join(self.tmpdirname , 'tokenizer.bin')
with open(lowercase_ , 'wb') as handle:
pickle.dump(lowercase_ , lowercase_)
with open(lowercase_ , 'rb') as handle:
__snake_case = pickle.load(lowercase_)
__snake_case = tokenizer_new.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
@require_jumanpp
def _a ( self) -> Any:
__snake_case = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def _a ( self) -> List[str]:
__snake_case = JumanppTokenizer(do_lower_case=lowercase_)
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def _a ( self) -> int:
__snake_case = JumanppTokenizer(normalize_text=lowercase_)
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def _a ( self) -> List[str]:
__snake_case = JumanppTokenizer(trim_whitespace=lowercase_)
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def _a ( self) -> int:
__snake_case = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。') , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def _a ( self) -> int:
__snake_case = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
__snake_case = {}
for i, token in enumerate(lowercase_):
__snake_case = i
__snake_case = WordpieceTokenizer(vocab=lowercase_ , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('こんにちは') , ['こんにちは'])
self.assertListEqual(tokenizer.tokenize('こんばんは') , ['こん', '##ばんは'])
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは') , ['こん', '##ばんは', '[UNK]', 'こんにちは'])
def _a ( self) -> Optional[int]:
__snake_case = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp')
__snake_case = tokenizer.subword_tokenizer
__snake_case = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。')
self.assertListEqual(lowercase_ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'])
__snake_case = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは')
self.assertListEqual(lowercase_ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'])
def _a ( self) -> Union[str, Any]:
__snake_case = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese')
__snake_case = tokenizer.encode('ありがとう。' , add_special_tokens=lowercase_)
__snake_case = tokenizer.encode('どういたしまして。' , add_special_tokens=lowercase_)
__snake_case = tokenizer.build_inputs_with_special_tokens(lowercase_)
__snake_case = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowercase ( lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = BertJapaneseTokenizer
__UpperCAmelCase = False
def _a ( self) -> Optional[int]:
super().setUp()
__snake_case = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def _a ( self , **lowercase_) -> Union[str, Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **lowercase_)
def _a ( self , lowercase_) -> Any:
__snake_case = 'こんにちは、世界。 \nこんばんは、世界。'
__snake_case = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def _a ( self) -> List[Any]:
pass # TODO add if relevant
def _a ( self) -> Dict:
pass # TODO add if relevant
def _a ( self) -> List[str]:
pass # TODO add if relevant
def _a ( self) -> str:
__snake_case = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character')
__snake_case = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。')
self.assertListEqual(
lowercase_ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2])
def _a ( self) -> Optional[Any]:
__snake_case = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__snake_case = {}
for i, token in enumerate(lowercase_):
__snake_case = i
__snake_case = CharacterTokenizer(vocab=lowercase_ , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('こんにちは') , ['こ', 'ん', 'に', 'ち', 'は'])
self.assertListEqual(tokenizer.tokenize('こんにちほ') , ['こ', 'ん', 'に', 'ち', '[UNK]'])
def _a ( self) -> Union[str, Any]:
__snake_case = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char')
__snake_case = tokenizer.encode('ありがとう。' , add_special_tokens=lowercase_)
__snake_case = tokenizer.encode('どういたしまして。' , add_special_tokens=lowercase_)
__snake_case = tokenizer.build_inputs_with_special_tokens(lowercase_)
__snake_case = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowercase ( unittest.TestCase ):
def _a ( self) -> Union[str, Any]:
__snake_case = 'cl-tohoku/bert-base-japanese'
__snake_case = AutoTokenizer.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
class __lowercase ( unittest.TestCase ):
def _a ( self) -> Dict:
__snake_case = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING') as cm:
BertTokenizer.from_pretrained(lowercase_)
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.'))
__snake_case = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING') as cm:
BertJapaneseTokenizer.from_pretrained(lowercase_)
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.'))
| 676 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase__ : str = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''esm'''
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1_0_2_6 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_="absolute" , lowercase_=True , lowercase_=None , lowercase_=False , lowercase_=False , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
super().__init__(pad_token_id=lowercase_ , mask_token_id=lowercase_ , **lowercase_)
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = emb_layer_norm_before
__snake_case = token_dropout
__snake_case = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.')
__snake_case = EsmFoldConfig()
elif isinstance(lowercase_ , lowercase_):
__snake_case = EsmFoldConfig(**lowercase_)
__snake_case = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!')
__snake_case = get_default_vocab_list()
else:
__snake_case = vocab_list
else:
__snake_case = None
__snake_case = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , lowercase_):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!')
def _a ( self) -> List[Any]:
__snake_case = super().to_dict()
if isinstance(self.esmfold_config , lowercase_):
__snake_case = self.esmfold_config.to_dict()
return output
@dataclass
class __lowercase :
__UpperCAmelCase = None
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = 0
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = 128
__UpperCAmelCase = None
def _a ( self) -> int:
if self.trunk is None:
__snake_case = TrunkConfig()
elif isinstance(self.trunk , lowercase_):
__snake_case = TrunkConfig(**self.trunk)
def _a ( self) -> Optional[Any]:
__snake_case = asdict(self)
__snake_case = self.trunk.to_dict()
return output
@dataclass
class __lowercase :
__UpperCAmelCase = 48
__UpperCAmelCase = 1_024
__UpperCAmelCase = 128
__UpperCAmelCase = 32
__UpperCAmelCase = 32
__UpperCAmelCase = 32
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = False
__UpperCAmelCase = 4
__UpperCAmelCase = 128
__UpperCAmelCase = None
def _a ( self) -> int:
if self.structure_module is None:
__snake_case = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase_):
__snake_case = StructureModuleConfig(**self.structure_module)
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}.")
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F" {self.sequence_state_dim} and {self.sequence_state_dim}.")
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}.")
__snake_case = self.sequence_state_dim // self.sequence_head_width
__snake_case = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.")
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.")
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.")
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}.")
def _a ( self) -> str:
__snake_case = asdict(self)
__snake_case = self.structure_module.to_dict()
return output
@dataclass
class __lowercase :
__UpperCAmelCase = 384
__UpperCAmelCase = 128
__UpperCAmelCase = 16
__UpperCAmelCase = 128
__UpperCAmelCase = 12
__UpperCAmelCase = 4
__UpperCAmelCase = 8
__UpperCAmelCase = 0.1
__UpperCAmelCase = 8
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = 7
__UpperCAmelCase = 10
__UpperCAmelCase = 1e-8
__UpperCAmelCase = 1e5
def _a ( self) -> Dict:
return asdict(self)
def A ( ) -> Any:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 676 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def A ( snake_case__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def A ( snake_case__ : Tuple ) -> int:
'''simple docstring'''
class __lowercase :
def __init__( self , lowercase_) -> Optional[Any]:
__snake_case = metric_id
class __lowercase :
__UpperCAmelCase = [MetricMock(lowerCamelCase__ ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def _a ( self) -> str:
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def A ( snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if "tmp_path" in args:
__snake_case = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(snake_case__ , match='https://huggingface.co/docs/evaluate' ):
func(*snake_case__ )
| 676 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = ['''input_features''', '''attention_mask''']
def __init__( self , lowercase_=8_0 , lowercase_=1_6_0_0_0 , lowercase_=8_0 , lowercase_=0.0 , lowercase_=True , lowercase_=True , lowercase_=True , **lowercase_ , ) -> Optional[int]:
super().__init__(feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , **lowercase_)
__snake_case = num_mel_bins
__snake_case = do_ceptral_normalize
__snake_case = normalize_means
__snake_case = normalize_vars
__snake_case = True
def _a ( self , lowercase_ , ) -> np.ndarray:
__snake_case = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
__snake_case = torch.from_numpy(lowercase_).unsqueeze(0)
__snake_case = ta_kaldi.fbank(lowercase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def _a ( lowercase_ , lowercase_ , lowercase_ = True , lowercase_ = True , lowercase_ = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
__snake_case = x[:input_length].mean(axis=0)
__snake_case = np.subtract(lowercase_ , lowercase_)
if normalize_vars:
__snake_case = x[:input_length].std(axis=0)
__snake_case = np.divide(lowercase_ , lowercase_)
if input_length < x.shape[0]:
__snake_case = padding_value
# make sure array is in float32
__snake_case = x.astype(np.floataa)
return x
def _a ( self , lowercase_ , lowercase_ = None) -> List[np.ndarray]:
__snake_case = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowercase_ , lowercase_ , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(lowercase_ , lowercase_)
]
def __call__( self , lowercase_ , lowercase_ = False , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
__snake_case = isinstance(lowercase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}")
__snake_case = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__snake_case = [np.asarray(lowercase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray):
__snake_case = np.asarray(lowercase_ , dtype=np.floataa)
elif isinstance(lowercase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__snake_case = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__snake_case = [raw_speech]
# extract fbank features
__snake_case = [self._extract_fbank_features(lowercase_) for waveform in raw_speech]
# convert into correct format for padding
__snake_case = BatchFeature({'input_features': features})
__snake_case = self.pad(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
# make sure list is in array format
__snake_case = padded_inputs.get('input_features')
if isinstance(input_features[0] , lowercase_):
__snake_case = [np.asarray(lowercase_ , dtype=np.floataa) for feature in input_features]
__snake_case = padded_inputs.get('attention_mask')
if attention_mask is not None:
__snake_case = [np.asarray(lowercase_ , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__snake_case = (
np.array(lowercase_ , dtype=np.intaa)
if self._get_padding_strategies(lowercase_ , max_length=lowercase_) is not PaddingStrategy.DO_NOT_PAD
else None
)
__snake_case = self.normalize(
padded_inputs['input_features'] , attention_mask=lowercase_)
if return_tensors is not None:
__snake_case = padded_inputs.convert_to_tensors(lowercase_)
return padded_inputs
| 676 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Any = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = 3_2 , lowercase_=PILImageResampling.BILINEAR , lowercase_ = True , **lowercase_ , ) -> None:
__snake_case = do_resize
__snake_case = do_rescale
__snake_case = size_divisor
__snake_case = resample
super().__init__(**lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
__snake_case , __snake_case = get_image_size(lowercase_)
# Rounds the height and width down to the closest multiple of size_divisor
__snake_case = height // size_divisor * size_divisor
__snake_case = width // size_divisor * size_divisor
__snake_case = resize(lowercase_ , (new_h, new_w) , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
return image
def _a ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(image=lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> BatchFeature:
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = size_divisor if size_divisor is not None else self.size_divisor
__snake_case = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing')
__snake_case = make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError('Invalid image(s)')
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(lowercase_) for img in images]
if do_resize:
__snake_case = [self.resize(lowercase_ , size_divisor=lowercase_ , resample=lowercase_) for image in images]
if do_rescale:
__snake_case = [self.rescale(lowercase_ , scale=1 / 2_5_5) for image in images]
__snake_case = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
__snake_case = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 676 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 1 |
UpperCAmelCase__ : List[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 676 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase = '''CLIPImageProcessor'''
__UpperCAmelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_) -> Dict:
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
__snake_case = kwargs.pop('feature_extractor')
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowercase_ , lowercase_)
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_) -> List[Any]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
__snake_case = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if images is not None:
__snake_case = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_) , tensor_type=lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> Any:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> str:
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def _a ( self) -> Dict:
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 676 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = IFInpaintingPipeline
__UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
__UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _a ( self) -> Optional[Any]:
return self._get_dummy_components()
def _a ( self , lowercase_ , lowercase_=0) -> Any:
if str(lowercase_).startswith('mps'):
__snake_case = torch.manual_seed(lowercase_)
else:
__snake_case = torch.Generator(device=lowercase_).manual_seed(lowercase_)
__snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_)).to(lowercase_)
__snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_)).to(lowercase_)
__snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def _a ( self) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _a ( self) -> List[str]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def _a ( self) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def _a ( self) -> Optional[Any]:
self._test_save_load_local()
def _a ( self) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 676 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 | 1 |
UpperCAmelCase__ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_5818,
}
def A ( snake_case__ : str , snake_case__ : str , snake_case__ : float ) -> float:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__snake_case = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(snake_case__ )}"
)
raise ValueError(snake_case__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , lowercase_ = 7_6_8 , ) -> str:
super().__init__()
__snake_case = nn.Parameter(torch.zeros(1 , lowercase_))
__snake_case = nn.Parameter(torch.ones(1 , lowercase_))
def _a ( self , lowercase_ = None , lowercase_ = None , ) -> Optional[Any]:
__snake_case = nn.Parameter(self.mean.to(lowercase_).to(lowercase_))
__snake_case = nn.Parameter(self.std.to(lowercase_).to(lowercase_))
return self
def _a ( self , lowercase_) -> List[str]:
__snake_case = (embeds - self.mean) * 1.0 / self.std
return embeds
def _a ( self , lowercase_) -> int:
__snake_case = (embeds * self.std) + self.mean
return embeds
| 676 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : List[Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
UpperCAmelCase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''beit'''
def __init__( self , lowercase_=8_1_9_2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=2_2_4 , lowercase_=1_6 , lowercase_=3 , lowercase_=False , lowercase_=False , lowercase_=False , lowercase_=False , lowercase_=0.1 , lowercase_=0.1 , lowercase_=True , lowercase_=[3, 5, 7, 1_1] , lowercase_=[1, 2, 3, 6] , lowercase_=True , lowercase_=0.4 , lowercase_=2_5_6 , lowercase_=1 , lowercase_=False , lowercase_=2_5_5 , **lowercase_ , ) -> str:
super().__init__(**lowercase_)
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = use_mask_token
__snake_case = use_absolute_position_embeddings
__snake_case = use_relative_position_bias
__snake_case = use_shared_relative_position_bias
__snake_case = layer_scale_init_value
__snake_case = drop_path_rate
__snake_case = use_mean_pooling
# decode head attributes (semantic segmentation)
__snake_case = out_indices
__snake_case = pool_scales
# auxiliary head attributes (semantic segmentation)
__snake_case = use_auxiliary_head
__snake_case = auxiliary_loss_weight
__snake_case = auxiliary_channels
__snake_case = auxiliary_num_convs
__snake_case = auxiliary_concat_input
__snake_case = semantic_loss_ignore_index
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def _a ( self) -> float:
return 1e-4
| 676 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_) -> List[str]:
super().__init__()
# make sure scheduler can always be converted to DDIM
__snake_case = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowercase_ , scheduler=lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , lowercase_):
__snake_case = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__snake_case = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(lowercase_)}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators.")
__snake_case = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(lowercase_)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
__snake_case = self.unet(lowercase_ , lowercase_).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__snake_case = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_).prev_sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_)
| 676 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
def A ( snake_case__ : List[Any] , snake_case__ : Any=False ) -> str:
'''simple docstring'''
__snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def A ( snake_case__ : List[str] , snake_case__ : str , snake_case__ : Any=False ) -> List[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__snake_case = ''
else:
__snake_case = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
__snake_case = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[
: config.hidden_size, :
]
__snake_case = in_proj_bias[: config.hidden_size]
__snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case = in_proj_weight[
-config.hidden_size :, :
]
__snake_case = in_proj_bias[-config.hidden_size :]
def A ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[int] ) -> Dict:
'''simple docstring'''
__snake_case = dct.pop(snake_case__ )
__snake_case = val
def A ( ) -> str:
'''simple docstring'''
__snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def A ( snake_case__ : Tuple , snake_case__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case = DeiTConfig()
# all deit models have fine-tuned heads
__snake_case = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__snake_case = 1000
__snake_case = 'huggingface/label-files'
__snake_case = 'imagenet-1k-id2label.json'
__snake_case = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
__snake_case = {int(snake_case__ ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = int(deit_name[-6:-4] )
__snake_case = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
__snake_case = 192
__snake_case = 768
__snake_case = 12
__snake_case = 3
elif deit_name[9:].startswith('small' ):
__snake_case = 384
__snake_case = 1536
__snake_case = 12
__snake_case = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
__snake_case = 1024
__snake_case = 4096
__snake_case = 24
__snake_case = 16
# load original model from timm
__snake_case = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__snake_case = timm_model.state_dict()
__snake_case = create_rename_keys(snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , snake_case__ )
# load HuggingFace model
__snake_case = DeiTForImageClassificationWithTeacher(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by DeiTImageProcessor
__snake_case = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__snake_case = DeiTImageProcessor(size=snake_case__ , crop_size=config.image_size )
__snake_case = image_processor(images=prepare_img() , return_tensors='pt' )
__snake_case = encoding['pixel_values']
__snake_case = model(snake_case__ )
__snake_case = timm_model(snake_case__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case__ , outputs.logits , atol=1e-3 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 676 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 1 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''new-model'''
if is_tf_available():
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = NewModelConfig
@require_tf
class __lowercase ( unittest.TestCase ):
@slow
def _a ( self) -> List[Any]:
__snake_case = 'bert-base-cased'
__snake_case = AutoConfig.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
__snake_case = TFAutoModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
@slow
def _a ( self) -> Any:
__snake_case = 'bert-base-cased'
__snake_case = AutoConfig.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
__snake_case = TFAutoModelForPreTraining.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
@slow
def _a ( self) -> Any:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
__snake_case = TFAutoModelForCausalLM.from_pretrained(lowercase_)
__snake_case , __snake_case = TFAutoModelForCausalLM.from_pretrained(lowercase_ , output_loading_info=lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
@slow
def _a ( self) -> Dict:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
__snake_case = TFAutoModelWithLMHead.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
@slow
def _a ( self) -> Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
__snake_case = TFAutoModelForMaskedLM.from_pretrained(lowercase_)
__snake_case , __snake_case = TFAutoModelForMaskedLM.from_pretrained(lowercase_ , output_loading_info=lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
@slow
def _a ( self) -> Tuple:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
__snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase_)
__snake_case , __snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase_ , output_loading_info=lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
@slow
def _a ( self) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__snake_case = AutoConfig.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
__snake_case = TFAutoModelForSequenceClassification.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
@slow
def _a ( self) -> int:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__snake_case = AutoConfig.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
__snake_case = TFAutoModelForQuestionAnswering.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_tensorflow_probability
def _a ( self) -> str:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__snake_case = AutoConfig.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
__snake_case = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase_)
__snake_case , __snake_case = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase_ , output_loading_info=lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
def _a ( self) -> Union[str, Any]:
__snake_case = TFAutoModelWithLMHead.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
self.assertEqual(model.num_parameters() , 1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=lowercase_) , 1_4_4_1_0)
def _a ( self) -> str:
__snake_case = TFAutoModelWithLMHead.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
self.assertEqual(model.num_parameters() , 1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=lowercase_) , 1_4_4_1_0)
def _a ( self) -> int:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__snake_case = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny')
self.assertIsInstance(lowercase_ , lowercase_)
__snake_case = copy.deepcopy(model.config)
__snake_case = ['FunnelBaseModel']
__snake_case = TFAutoModel.from_config(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase_)
__snake_case = TFAutoModel.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
def _a ( self) -> Union[str, Any]:
try:
AutoConfig.register('new-model' , lowercase_)
__snake_case = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(lowercase_):
auto_class.register(lowercase_ , lowercase_)
auto_class.register(lowercase_ , lowercase_)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_):
auto_class.register(lowercase_ , lowercase_)
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case = BertModelTester(self).get_config()
__snake_case = NewModelConfig(**tiny_config.to_dict())
__snake_case = auto_class.from_config(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase_)
__snake_case = auto_class.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _a ( self) -> List[str]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier'):
__snake_case = TFAutoModel.from_pretrained('bert-base')
def _a ( self) -> Any:
with self.assertRaisesRegex(
lowercase_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__snake_case = TFAutoModel.from_pretrained(lowercase_ , revision='aaaaaa')
def _a ( self) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
__snake_case = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def _a ( self) -> List[Any]:
with self.assertRaisesRegex(lowercase_ , 'Use `from_pt=True` to load this model'):
__snake_case = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
def _a ( self) -> int:
# Make sure we have cached the model.
__snake_case = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
__snake_case = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
__snake_case = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
with RequestCounter() as counter:
__snake_case = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 676 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 1 |
def A ( snake_case__ : list ) -> list:
'''simple docstring'''
__snake_case = len(snake_case__ )
for _ in range(snake_case__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 676 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 1 |
from __future__ import annotations
import numpy as np
def A ( snake_case__ : list[float] ) -> Any:
'''simple docstring'''
return np.maximum(0 , snake_case__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 676 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''informer'''
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = None , lowercase_ = "mean" , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 6_4 , lowercase_ = 3_2 , lowercase_ = 3_2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = True , lowercase_ = "gelu" , lowercase_ = 0.05 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 1_0_0 , lowercase_ = 0.02 , lowercase_=True , lowercase_ = "prob" , lowercase_ = 5 , lowercase_ = True , **lowercase_ , ) -> Optional[int]:
# time series specific configuration
__snake_case = prediction_length
__snake_case = context_length or prediction_length
__snake_case = distribution_output
__snake_case = loss
__snake_case = input_size
__snake_case = num_time_features
__snake_case = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__snake_case = scaling
__snake_case = num_dynamic_real_features
__snake_case = num_static_real_features
__snake_case = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
__snake_case = cardinality
else:
__snake_case = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
__snake_case = embedding_dimension
else:
__snake_case = [min(5_0 , (cat + 1) // 2) for cat in self.cardinality]
__snake_case = num_parallel_samples
# Transformer architecture configuration
__snake_case = input_size * len(self.lags_sequence) + self._number_of_features
__snake_case = d_model
__snake_case = encoder_attention_heads
__snake_case = decoder_attention_heads
__snake_case = encoder_ffn_dim
__snake_case = decoder_ffn_dim
__snake_case = encoder_layers
__snake_case = decoder_layers
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = activation_function
__snake_case = init_std
__snake_case = use_cache
# Informer
__snake_case = attention_type
__snake_case = sampling_factor
__snake_case = distil
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 676 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 676 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 | 1 |
from math import isqrt
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def A ( snake_case__ : int = 10**6 ) -> int:
'''simple docstring'''
__snake_case = 0
__snake_case = 1
__snake_case = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 1 |
from __future__ import annotations
UpperCAmelCase__ : List[Any] = 1.6_021e-19 # units = C
def A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ) -> tuple[str, float]:
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
from __future__ import annotations
from random import choice
def A ( snake_case__ : Any ) -> List[str]:
'''simple docstring'''
return choice(snake_case__ )
def A ( snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = random_pivot(snake_case__ )
# partition based on pivot
# linear time
__snake_case = [e for e in lst if e < pivot]
__snake_case = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(snake_case__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(snake_case__ ) < k - 1:
return kth_number(snake_case__ , k - len(snake_case__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(snake_case__ , snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
UpperCAmelCase__ : int = TypeVar("T")
class __lowercase ( Generic[T] ):
def __init__( self , lowercase_ , lowercase_) -> None:
__snake_case = None
__snake_case = len(lowercase_)
__snake_case = [any_type for _ in range(self.N)] + arr
__snake_case = fnc
self.build()
def _a ( self) -> None:
for p in range(self.N - 1 , 0 , -1):
__snake_case = self.fn(self.st[p * 2] , self.st[p * 2 + 1])
def _a ( self , lowercase_ , lowercase_) -> None:
p += self.N
__snake_case = v
while p > 1:
__snake_case = p // 2
__snake_case = self.fn(self.st[p * 2] , self.st[p * 2 + 1])
def _a ( self , lowercase_ , lowercase_) -> T | None: # noqa: E741
__snake_case , __snake_case = l + self.N, r + self.N
__snake_case = None
while l <= r:
if l % 2 == 1:
__snake_case = self.st[l] if res is None else self.fn(lowercase_ , self.st[l])
if r % 2 == 0:
__snake_case = self.st[r] if res is None else self.fn(lowercase_ , self.st[r])
__snake_case , __snake_case = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
UpperCAmelCase__ : Optional[Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
UpperCAmelCase__ : List[Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
UpperCAmelCase__ : Optional[Any] = SegmentTree(test_array, min)
UpperCAmelCase__ : str = SegmentTree(test_array, max)
UpperCAmelCase__ : List[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A ( ) -> None:
'''simple docstring'''
for i in range(len(snake_case__ ) ):
for j in range(snake_case__ , len(snake_case__ ) ):
__snake_case = reduce(snake_case__ , test_array[i : j + 1] )
__snake_case = reduce(snake_case__ , test_array[i : j + 1] )
__snake_case = reduce(lambda snake_case__ , snake_case__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case__ , snake_case__ )
assert max_range == max_segment_tree.query(snake_case__ , snake_case__ )
assert sum_range == sum_segment_tree.query(snake_case__ , snake_case__ )
test_all_segments()
for index, value in test_updates.items():
UpperCAmelCase__ : List[str] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 676 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 1 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCAmelCase__ : Optional[int] = "pt"
elif is_tf_available():
UpperCAmelCase__ : int = "tf"
else:
UpperCAmelCase__ : Union[str, Any] = "jax"
class __lowercase ( lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def _a ( self) -> List[Any]:
super().setUp()
__snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def _a ( self) -> Union[str, Any]:
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver')
def _a ( self , **lowercase_) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_)
def _a ( self , lowercase_ , lowercase_=False , lowercase_=2_0 , lowercase_=5) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case = []
for i in range(len(lowercase_)):
try:
__snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase_)
except UnicodeDecodeError:
pass
toks.append((i, tok))
__snake_case = list(filter(lambda lowercase_: re.match(r'^[ a-zA-Z]+$' , t[1]) , lowercase_))
__snake_case = list(filter(lambda lowercase_: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase_) , lowercase_))
if max_length is not None and len(lowercase_) > max_length:
__snake_case = toks[:max_length]
if min_length is not None and len(lowercase_) < min_length and len(lowercase_) > 0:
while len(lowercase_) < min_length:
__snake_case = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case = [t[0] for t in toks]
# Ensure consistency
__snake_case = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_)
if " " not in output_txt and len(lowercase_) > 1:
__snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase_)
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase_)
)
if with_prefix_space:
__snake_case = ' ' + output_txt
__snake_case = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
return output_txt, output_ids
def _a ( self) -> Optional[Any]:
__snake_case = self.perceiver_tokenizer
__snake_case = 'Unicode €.'
__snake_case = tokenizer(lowercase_)
__snake_case = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'] , lowercase_)
# decoding
__snake_case = tokenizer.decode(lowercase_)
self.assertEqual(lowercase_ , '[CLS]Unicode €.[SEP]')
__snake_case = tokenizer('e è é ê ë')
__snake_case = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'] , lowercase_)
# decoding
__snake_case = tokenizer.decode(lowercase_)
self.assertEqual(lowercase_ , '[CLS]e è é ê ë[SEP]')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë')) , '[CLS]e è é ê ë[SEP]')
def _a ( self) -> int:
__snake_case = self.perceiver_tokenizer
__snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
__snake_case = tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
if FRAMEWORK != "jax":
__snake_case = list(batch.input_ids.numpy()[0])
else:
__snake_case = list(batch.input_ids.tolist()[0])
self.assertListEqual(lowercase_ , lowercase_)
self.assertEqual((2, 3_8) , batch.input_ids.shape)
self.assertEqual((2, 3_8) , batch.attention_mask.shape)
def _a ( self) -> List[Any]:
__snake_case = self.perceiver_tokenizer
__snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case = tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase_)
self.assertIn('attention_mask' , lowercase_)
self.assertNotIn('decoder_input_ids' , lowercase_)
self.assertNotIn('decoder_attention_mask' , lowercase_)
def _a ( self) -> str:
__snake_case = self.perceiver_tokenizer
__snake_case = [
'Summary of the text.',
'Another summary.',
]
__snake_case = tokenizer(
text_target=lowercase_ , max_length=3_2 , padding='max_length' , truncation=lowercase_ , return_tensors=lowercase_)
self.assertEqual(3_2 , targets['input_ids'].shape[1])
def _a ( self) -> List[str]:
# safety check on max_len default value so we are sure the test works
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length , 4_2)
# Now let's start the test
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case = tempfile.mkdtemp()
__snake_case = ' He is very happy, UNwant\u00E9d,running'
__snake_case = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
tokenizer.save_pretrained(lowercase_)
__snake_case = tokenizer.__class__.from_pretrained(lowercase_)
__snake_case = after_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
shutil.rmtree(lowercase_)
__snake_case = self.get_tokenizers(model_max_length=4_2)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case = tempfile.mkdtemp()
__snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'])
__snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token')
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
__snake_case = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
tokenizer.save_pretrained(lowercase_)
__snake_case = tokenizer.__class__.from_pretrained(lowercase_)
__snake_case = after_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 4_2)
__snake_case = tokenizer.__class__.from_pretrained(lowercase_ , model_max_length=4_3)
self.assertEqual(tokenizer.model_max_length , 4_3)
shutil.rmtree(lowercase_)
def _a ( self) -> Optional[int]:
__snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase_)
with open(os.path.join(lowercase_ , 'special_tokens_map.json') , encoding='utf-8') as json_file:
__snake_case = json.load(lowercase_)
with open(os.path.join(lowercase_ , 'tokenizer_config.json') , encoding='utf-8') as json_file:
__snake_case = json.load(lowercase_)
__snake_case = [F"<extra_id_{i}>" for i in range(1_2_5)]
__snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase_ , 'special_tokens_map.json') , 'w' , encoding='utf-8') as outfile:
json.dump(lowercase_ , lowercase_)
with open(os.path.join(lowercase_ , 'tokenizer_config.json') , 'w' , encoding='utf-8') as outfile:
json.dump(lowercase_ , lowercase_)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case = tokenizer_class.from_pretrained(
lowercase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens)
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase_)]
__snake_case = tokenizer_class.from_pretrained(
lowercase_ , additional_special_tokens=lowercase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens)
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'])) , )
def _a ( self) -> Union[str, Any]:
__snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8]) , '�')
def _a ( self) -> Optional[int]:
pass
def _a ( self) -> int:
pass
def _a ( self) -> Union[str, Any]:
pass
def _a ( self) -> List[Any]:
pass
def _a ( self) -> Optional[int]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case = self.get_tokenizers(fast=lowercase_ , do_lower_case=lowercase_)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
__snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case = tokenizer.convert_tokens_to_string(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
| 676 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''altclip_text_model'''
def __init__( self , lowercase_=2_5_0_0_0_2 , lowercase_=1_0_2_4 , lowercase_=2_4 , lowercase_=1_6 , lowercase_=4_0_9_6 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_4 , lowercase_=1 , lowercase_=0.02 , lowercase_=0.02 , lowercase_=1e-05 , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_="absolute" , lowercase_=True , lowercase_=7_6_8 , **lowercase_ , ) -> int:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = initializer_factor
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = project_dim
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''altclip_vision_model'''
def __init__( self , lowercase_=7_6_8 , lowercase_=3_0_7_2 , lowercase_=5_1_2 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3 , lowercase_=2_2_4 , lowercase_=3_2 , lowercase_="quick_gelu" , lowercase_=1e-5 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , **lowercase_ , ) -> List[str]:
super().__init__(**lowercase_)
__snake_case = hidden_size
__snake_case = intermediate_size
__snake_case = projection_dim
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = num_channels
__snake_case = patch_size
__snake_case = image_size
__snake_case = initializer_range
__snake_case = initializer_factor
__snake_case = attention_dropout
__snake_case = layer_norm_eps
__snake_case = hidden_act
@classmethod
def _a ( cls , lowercase_ , **lowercase_) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_)
__snake_case , __snake_case = cls.get_config_dict(lowercase_ , **lowercase_)
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type') == "altclip":
__snake_case = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(lowercase_ , **lowercase_)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''altclip'''
__UpperCAmelCase = True
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=7_6_8 , lowercase_=2.6592 , **lowercase_) -> List[str]:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
__snake_case = kwargs.pop('text_config_dict' , lowercase_)
__snake_case = kwargs.pop('vision_config_dict' , lowercase_)
super().__init__(**lowercase_)
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__snake_case = {}
# This is the complete result when using `text_config_dict`.
__snake_case = AltCLIPTextConfig(**lowercase_).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__snake_case = (
F"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
F"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__snake_case = (
F"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
F"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(lowercase_)
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict)
if vision_config_dict is not None:
if vision_config is None:
__snake_case = {}
# This is the complete result when using `vision_config_dict`.
__snake_case = AltCLIPVisionConfig(**lowercase_).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__snake_case = {
str(lowercase_): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__snake_case = (
F"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
F"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__snake_case = (
F"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
F"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(lowercase_)
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict)
if text_config is None:
__snake_case = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.')
if vision_config is None:
__snake_case = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.')
__snake_case = AltCLIPTextConfig(**lowercase_)
__snake_case = AltCLIPVisionConfig(**lowercase_)
__snake_case = projection_dim
__snake_case = logit_scale_init_value
__snake_case = 1.0
@classmethod
def _a ( cls , lowercase_ , lowercase_ , **lowercase_) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_)
def _a ( self) -> Any:
__snake_case = copy.deepcopy(self.__dict__)
__snake_case = self.text_config.to_dict()
__snake_case = self.vision_config.to_dict()
__snake_case = self.__class__.model_type
return output
| 676 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Tuple = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''yolos'''
def __init__( self , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=[5_1_2, 8_6_4] , lowercase_=1_6 , lowercase_=3 , lowercase_=True , lowercase_=1_0_0 , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Tuple:
super().__init__(**lowercase_)
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
__snake_case = num_detection_tokens
__snake_case = use_mid_position_embeddings
__snake_case = auxiliary_loss
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def _a ( self) -> float:
return 1e-4
@property
def _a ( self) -> int:
return 1_2
| 676 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 1 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
UpperCAmelCase__ : Dict = logging.getLogger(__name__)
def A ( snake_case__ : str ) -> List[str]:
'''simple docstring'''
__snake_case = git.Repo(search_parent_directories=snake_case__ )
__snake_case = {
'repo_id': str(snake_case__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(snake_case__ , 'git_log.json' ) , 'w' ) as f:
json.dump(snake_case__ , snake_case__ , indent=4 )
def A ( snake_case__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if params.n_gpu <= 0:
__snake_case = 0
__snake_case = -1
__snake_case = True
__snake_case = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
__snake_case = int(os.environ['WORLD_SIZE'] )
__snake_case = int(os.environ['N_GPU_NODE'] )
__snake_case = int(os.environ['RANK'] )
# number of nodes / node ID
__snake_case = params.world_size // params.n_gpu_per_node
__snake_case = params.global_rank // params.n_gpu_per_node
__snake_case = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
__snake_case = 1
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 1
__snake_case = 1
__snake_case = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__snake_case = params.node_id == 0 and params.local_rank == 0
__snake_case = params.n_nodes > 1
# summary
__snake_case = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def A ( snake_case__ : Dict ) -> Tuple:
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 676 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 1 |
import math
import random
def A ( snake_case__ : float , snake_case__ : bool = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCAmelCase__ : List[str] = 0.02
def A ( snake_case__ : int , snake_case__ : int ) -> float:
'''simple docstring'''
__snake_case = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
__snake_case = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__snake_case = (expected / 100) - layer_a
# Error delta
__snake_case = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : Dict = int(input("Expected value: "))
UpperCAmelCase__ : Optional[Any] = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 676 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 1 |
def A ( snake_case__ : List[str] , snake_case__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def A ( snake_case__ : Dict , snake_case__ : List[Any] ) -> int:
'''simple docstring'''
__snake_case = [[float('inf' ) for _ in range(snake_case__ )] for _ in range(snake_case__ )]
for i in range(snake_case__ ):
for j in range(snake_case__ ):
__snake_case = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(snake_case__ ):
# looping through rows of graph array
for i in range(snake_case__ ):
# looping through columns of graph array
for j in range(snake_case__ ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__snake_case = dist[i][k] + dist[k][j]
_print_dist(snake_case__ , snake_case__ )
return dist, v
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = int(input("Enter number of vertices: "))
UpperCAmelCase__ : int = int(input("Enter number of edges: "))
UpperCAmelCase__ : Optional[int] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
UpperCAmelCase__ : List[str] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
UpperCAmelCase__ : Dict = int(input("Enter source:"))
UpperCAmelCase__ : Optional[int] = int(input("Enter destination:"))
UpperCAmelCase__ : List[str] = float(input("Enter weight:"))
UpperCAmelCase__ : Optional[int] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 676 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 | 1 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def A ( snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
__snake_case = [False] * len(snake_case__ )
__snake_case = [-1] * len(snake_case__ )
def dfs(snake_case__ : Tuple , snake_case__ : Optional[int] ):
__snake_case = True
__snake_case = c
for u in graph[v]:
if not visited[u]:
dfs(snake_case__ , 1 - c )
for i in range(len(snake_case__ ) ):
if not visited[i]:
dfs(snake_case__ , 0 )
for i in range(len(snake_case__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase__ : Any = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 676 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
@slow
def _a ( self) -> List[str]:
__snake_case = TFCamembertModel.from_pretrained('jplu/tf-camembert-base')
__snake_case = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__snake_case = model(lowercase_)['last_hidden_state']
__snake_case = tf.TensorShape((1, 1_0, 7_6_8))
self.assertEqual(output.shape , lowercase_)
# compare the actual values for a slice.
__snake_case = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
| 676 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ : Dict = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 1 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def A ( snake_case__ : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case = args.pruning_method
__snake_case = args.threshold
__snake_case = args.model_name_or_path.rstrip('/' )
__snake_case = args.target_model_path
print(f"Load fine-pruned model from {model_name_or_path}" )
__snake_case = torch.load(os.path.join(snake_case__ , 'pytorch_model.bin' ) )
__snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__snake_case = tensor
print(f"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
__snake_case = tensor
print(f"Copied layer {name}" )
elif "bias" in name:
__snake_case = tensor
print(f"Copied layer {name}" )
else:
if pruning_method == "magnitude":
__snake_case = MagnitudeBinarizer.apply(inputs=snake_case__ , threshold=snake_case__ )
__snake_case = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__snake_case = name[:-6]
__snake_case = model[f"{prefix_}mask_scores"]
__snake_case = TopKBinarizer.apply(snake_case__ , snake_case__ )
__snake_case = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__snake_case = name[:-6]
__snake_case = model[f"{prefix_}mask_scores"]
__snake_case = ThresholdBinarizer.apply(snake_case__ , snake_case__ , snake_case__ )
__snake_case = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__snake_case = name[:-6]
__snake_case = model[f"{prefix_}mask_scores"]
__snake_case , __snake_case = -0.1, 1.1
__snake_case = torch.sigmoid(snake_case__ )
__snake_case = s * (r - l) + l
__snake_case = s_bar.clamp(min=0.0 , max=1.0 )
__snake_case = tensor * mask
print(f"Pruned layer {name}" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
__snake_case = os.path.join(
os.path.dirname(snake_case__ ) , f"bertarized_{os.path.basename(snake_case__ )}" )
if not os.path.isdir(snake_case__ ):
shutil.copytree(snake_case__ , snake_case__ )
print(f"\nCreated folder {target_model_path}" )
torch.save(snake_case__ , os.path.join(snake_case__ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
UpperCAmelCase__ : List[str] = parser.parse_args()
main(args)
| 676 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 1 |
from __future__ import annotations
def A ( snake_case__ : int ) -> list[int]:
'''simple docstring'''
__snake_case = [True] * limit
__snake_case = False
__snake_case = False
__snake_case = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__snake_case = i * 2
while index < limit:
__snake_case = False
__snake_case = index + i
__snake_case = [2]
for i in range(3 , snake_case__ , 2 ):
if is_prime[i]:
primes.append(snake_case__ )
return primes
def A ( snake_case__ : int = 100_0000 ) -> int:
'''simple docstring'''
__snake_case = prime_sieve(snake_case__ )
__snake_case = 0
__snake_case = 0
for i in range(len(snake_case__ ) ):
for j in range(i + length , len(snake_case__ ) ):
__snake_case = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__snake_case = j - i
__snake_case = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCAmelCase__ : Optional[Any] = "Create a default config file for Accelerate with only a few flags set."
def A ( snake_case__ : Dict="no" , snake_case__ : str = default_json_config_file , snake_case__ : bool = False ) -> Optional[int]:
'''simple docstring'''
__snake_case = Path(snake_case__ )
path.parent.mkdir(parents=snake_case__ , exist_ok=snake_case__ )
if path.exists():
print(
f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
__snake_case = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" )
__snake_case = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
__snake_case = torch.cuda.device_count()
__snake_case = num_gpus
__snake_case = False
if num_gpus > 1:
__snake_case = 'MULTI_GPU'
else:
__snake_case = 'NO'
elif is_xpu_available() and use_xpu:
__snake_case = torch.xpu.device_count()
__snake_case = num_xpus
__snake_case = False
if num_xpus > 1:
__snake_case = 'MULTI_XPU'
else:
__snake_case = 'NO'
elif is_npu_available():
__snake_case = torch.npu.device_count()
__snake_case = num_npus
__snake_case = False
if num_npus > 1:
__snake_case = 'MULTI_NPU'
else:
__snake_case = 'NO'
else:
__snake_case = 0
__snake_case = True
__snake_case = 1
__snake_case = 'NO'
__snake_case = ClusterConfig(**snake_case__ )
config.to_json_file(snake_case__ )
return path
def A ( snake_case__ : Dict , snake_case__ : Dict ) -> Any:
'''simple docstring'''
__snake_case = parser.add_parser('default' , parents=snake_case__ , help=snake_case__ , formatter_class=snake_case__ )
parser.add_argument(
'--config_file' , default=snake_case__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=snake_case__ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=snake_case__ )
return parser
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"accelerate configuration saved at {config_file}" )
| 676 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 1 |
import functools
from typing import Any
def A ( snake_case__ : str , snake_case__ : list[str] ) -> bool:
'''simple docstring'''
# Validation
if not isinstance(snake_case__ , snake_case__ ) or len(snake_case__ ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(snake_case__ , snake_case__ ) or not all(
isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
__snake_case = {}
__snake_case = 'WORD_KEEPER'
for word in words:
__snake_case = trie
for c in word:
if c not in trie_node:
__snake_case = {}
__snake_case = trie_node[c]
__snake_case = True
__snake_case = len(snake_case__ )
# Dynamic programming method
@functools.cache
def is_breakable(snake_case__ : int ) -> bool:
if index == len_string:
return True
__snake_case = trie
for i in range(snake_case__ , snake_case__ ):
__snake_case = trie_node.get(string[i] , snake_case__ )
if trie_node is None:
return False
if trie_node.get(snake_case__ , snake_case__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''trocr'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , lowercase_=5_0_2_6_5 , lowercase_=1_0_2_4 , lowercase_=1_2 , lowercase_=1_6 , lowercase_=4_0_9_6 , lowercase_="gelu" , lowercase_=5_1_2 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=2 , lowercase_=0.02 , lowercase_=0.0 , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=1 , lowercase_=0 , lowercase_=2 , **lowercase_ , ) -> int:
__snake_case = vocab_size
__snake_case = d_model
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = activation_function
__snake_case = max_position_embeddings
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = init_std
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = scale_embedding
__snake_case = use_learned_position_embeddings
__snake_case = layernorm_embedding
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 676 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 1 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
UpperCAmelCase__ : List[str] = logging.getLogger(__name__)
def A ( ) -> Optional[int]:
'''simple docstring'''
__snake_case = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=snake_case__ , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=snake_case__ , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=snake_case__ , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=snake_case__ , default='data/dump' , help='The dump file prefix.' )
__snake_case = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
__snake_case = BertTokenizer.from_pretrained(args.tokenizer_name )
__snake_case = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
__snake_case = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
__snake_case = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__snake_case = tokenizer.special_tokens_map['cls_token'] # `<s>`
__snake_case = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
__snake_case = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__snake_case = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
__snake_case = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
__snake_case = fp.readlines()
logger.info('Start encoding' )
logger.info(f"{len(snake_case__ )} examples to process." )
__snake_case = []
__snake_case = 0
__snake_case = 1_0000
__snake_case = time.time()
for text in data:
__snake_case = f"{bos} {text.strip()} {sep}"
__snake_case = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
rslt.append(snake_case__ )
iter += 1
if iter % interval == 0:
__snake_case = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
__snake_case = time.time()
logger.info('Finished binarization' )
logger.info(f"{len(snake_case__ )} examples processed." )
__snake_case = f"{args.dump_file}.{args.tokenizer_name}.pickle"
__snake_case = tokenizer.vocab_size
if vocab_size < (1 << 16):
__snake_case = [np.uintaa(snake_case__ ) for d in rslt]
else:
__snake_case = [np.intaa(snake_case__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(snake_case__ , 'wb' ) as handle:
pickle.dump(rslt_ , snake_case__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 676 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.