code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
lowerCAmelCase = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Any = VOCAB_FILES_NAMES
_lowerCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
_lowerCAmelCase : Dict = DistilBertTokenizer
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__="[UNK]" , lowercase__="[SEP]" , lowercase__="[PAD]" , lowercase__="[CLS]" , lowercase__="[MASK]" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
__UpperCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , lowercase__) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowercase__) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowercase__) != tokenize_chinese_chars
):
__UpperCAmelCase : Any = getattr(lowercase__ , normalizer_state.pop('''type'''))
__UpperCAmelCase : Dict = do_lower_case
__UpperCAmelCase : Optional[Any] = strip_accents
__UpperCAmelCase : str = tokenize_chinese_chars
__UpperCAmelCase : Tuple = normalizer_class(**lowercase__)
__UpperCAmelCase : str = do_lower_case
def A( self , lowercase__ , lowercase__=None):
__UpperCAmelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : Tuple = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : int = self._tokenizer.model.save(lowercase__ , name=lowercase__)
return tuple(lowercase__)
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase = """path-to-your-trained-model"""
lowerCAmelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
lowerCAmelCase = """A photo of sks dog in a bucket"""
lowerCAmelCase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 675 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase ( _UpperCamelCase ):
def A( self , lowercase__):
return 0.0
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int | float, int | float]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__UpperCAmelCase : Optional[Any] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 512
__UpperCAmelCase : Tuple = [1] + [0] * (size - 1)
__UpperCAmelCase : Dict = [filter_type.process(lowercase_ ) for item in inputs]
__UpperCAmelCase : Tuple = [0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCAmelCase : Any = np.abs(np.fft.fft(lowercase_ ) )
__UpperCAmelCase : str = 20 * np.logaa(lowercase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
__UpperCAmelCase : str = get_bounds(lowercase_ , lowercase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(lowercase_ )
plt.show()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 512
__UpperCAmelCase : str = [1] + [0] * (size - 1)
__UpperCAmelCase : Union[str, Any] = [filter_type.process(lowercase_ ) for item in inputs]
__UpperCAmelCase : List[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCAmelCase : List[Any] = np.angle(np.fft.fft(lowercase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(lowercase_ , -2 * pi ) )
plt.show()
| 675 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 1 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=4 , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.1 , lowercase__=True , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : int = seq_length
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : str = intermediate_multiple_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout
__UpperCAmelCase : Any = attention_dropout
__UpperCAmelCase : Optional[int] = weight_tying
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : Union[str, Any] = type_sequence_label_size
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : List[str] = num_choices
__UpperCAmelCase : Tuple = scope
def A( self):
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def A( self):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Dict = GPTNeoXJapaneseModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[str] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Union[str, Any] = GPTNeoXJapaneseModel(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : int = GPTNeoXJapaneseForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : List[Any] = GPTNeoXJapaneseForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
# first forward pass
__UpperCAmelCase : Dict = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__UpperCAmelCase : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : str = torch.cat([input_mask, next_mask] , dim=-1)
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , output_hidden_states=lowercase__)
__UpperCAmelCase : Optional[Any] = output_from_no_past['''hidden_states'''][0]
__UpperCAmelCase : int = model(
lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ , output_hidden_states=lowercase__ , )['''hidden_states'''][0]
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : str = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self):
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = config_and_inputs
__UpperCAmelCase : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : List[str] = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Optional[Any] = False
def A( self):
__UpperCAmelCase : Any = GPTNeoXJapaneseModelTester(self)
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowercase__ , lowercase__ , lowercase__)
def A( self):
# This regression test was failing with PyTorch < 1.3
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
__UpperCAmelCase : str = None
self.model_tester.create_and_check_model_as_decoder(lowercase__ , lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase__ , lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : List[str] = '''abeja/gpt-neox-japanese-2.7b'''
__UpperCAmelCase : Tuple = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
__UpperCAmelCase : List[str] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
__UpperCAmelCase : List[Any] = GPTNeoXJapaneseTokenizer.from_pretrained(lowercase__)
__UpperCAmelCase : List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowercase__)
__UpperCAmelCase : Tuple = []
for prompt in prompts:
__UpperCAmelCase : Union[str, Any] = tokenizer(lowercase__ , return_tensors='''pt''').input_ids
__UpperCAmelCase : Optional[int] = model.generate(lowercase__ , max_length=5_0)
__UpperCAmelCase : int = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
predicted_outputs += generated_string
self.assertListEqual(lowercase__ , lowercase__)
| 675 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = """▁"""
lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1_024,
}
# fmt: off
lowerCAmelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
_lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
_lowerCAmelCase : List[int] = []
_lowerCAmelCase : List[int] = []
def __init__( self , lowercase__ , lowercase__=None , lowercase__=None , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__ = None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else mask_token
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
__UpperCAmelCase : Optional[Any] = kwargs.get('''additional_special_tokens''' , [])
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase__ , tgt_lang=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
__UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowercase__))
__UpperCAmelCase : Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Any = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Optional[Any] = len(self.sp_model)
__UpperCAmelCase : Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase__)
}
__UpperCAmelCase : Any = {v: k for k, v in self.lang_code_to_id.items()}
__UpperCAmelCase : int = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
__UpperCAmelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__UpperCAmelCase : Optional[Any] = src_lang if src_lang is not None else '''en_XX'''
__UpperCAmelCase : Dict = self.lang_code_to_id[self._src_lang]
__UpperCAmelCase : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def A( self):
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A( self):
return self._src_lang
@src_lang.setter
def A( self , lowercase__):
__UpperCAmelCase : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self):
__UpperCAmelCase : List[Any] = self.__dict__.copy()
__UpperCAmelCase : Optional[Any] = None
return state
def __setstate__( self , lowercase__):
__UpperCAmelCase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__UpperCAmelCase : int = {}
__UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A( self):
__UpperCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(lowercase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A( self , lowercase__):
return self.sp_model.encode(lowercase__ , out_type=lowercase__)
def A( self , lowercase__):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Optional[Any] = self.sp_model.PieceToId(lowercase__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A( self , lowercase__):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A( self , lowercase__):
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : List[str] = ''''''
__UpperCAmelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__) + token
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : List[str] = []
else:
current_sub_tokens.append(lowercase__)
__UpperCAmelCase : str = False
out_string += self.sp_model.decode(lowercase__)
return out_string.strip()
def A( self , lowercase__ , lowercase__ = None):
if not os.path.isdir(lowercase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : str = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase__)
elif not os.path.isfile(self.vocab_file):
with open(lowercase__ , '''wb''') as fi:
__UpperCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase__)
return (out_vocab_file,)
def A( self , lowercase__ , lowercase__ = None , lowercase__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__)
__UpperCAmelCase : int = [1] * len(self.prefix_tokens)
__UpperCAmelCase : List[str] = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase__)) + suffix_ones
return prefix_ones + ([0] * len(lowercase__)) + ([0] * len(lowercase__)) + suffix_ones
def A( self , lowercase__ , lowercase__ = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''')
__UpperCAmelCase : str = src_lang
__UpperCAmelCase : List[Any] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__)
__UpperCAmelCase : Any = self.convert_tokens_to_ids(lowercase__)
__UpperCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def A( self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
__UpperCAmelCase : Optional[Any] = src_lang
__UpperCAmelCase : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__)
def A( self):
return self.set_src_lang_special_tokens(self.src_lang)
def A( self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def A( self , lowercase__):
__UpperCAmelCase : List[Any] = self.lang_code_to_id[src_lang]
__UpperCAmelCase : Optional[Any] = [self.cur_lang_code_id]
__UpperCAmelCase : List[str] = [self.eos_token_id]
def A( self , lowercase__):
__UpperCAmelCase : Tuple = self.lang_code_to_id[tgt_lang]
__UpperCAmelCase : int = [self.cur_lang_code_id]
__UpperCAmelCase : Dict = [self.eos_token_id]
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 675 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = len(lowercase_ ) // 2
# choose the middle 3 elements
__UpperCAmelCase : Optional[Any] = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCAmelCase = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
lowerCAmelCase = None
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=lowercase_ , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=lowercase_ , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__UpperCAmelCase : Optional[Any] = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
def remove_articles(lowercase_ ):
return ARTICLES_REGEX.sub(''' ''' , lowercase_ )
def white_space_fix(lowercase_ ):
return " ".join(text.split() )
def remove_punc(lowercase_ ):
__UpperCAmelCase : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
if not s:
return []
return normalize_answer(lowercase_ ).split()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
return int(normalize_answer(lowercase_ ) == normalize_answer(lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = get_tokens(lowercase_ )
__UpperCAmelCase : Optional[Any] = get_tokens(lowercase_ )
__UpperCAmelCase : Union[str, Any] = collections.Counter(lowercase_ ) & collections.Counter(lowercase_ )
__UpperCAmelCase : List[Any] = sum(common.values() )
if len(lowercase_ ) == 0 or len(lowercase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__UpperCAmelCase : str = 1.0 * num_same / len(lowercase_ )
__UpperCAmelCase : Optional[int] = 1.0 * num_same / len(lowercase_ )
__UpperCAmelCase : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = {}
__UpperCAmelCase : List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__UpperCAmelCase : List[Any] = qa['''id''']
__UpperCAmelCase : Optional[int] = [t for t in qa['''answers''']['''text'''] if normalize_answer(lowercase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__UpperCAmelCase : Union[str, Any] = ['''''']
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
__UpperCAmelCase : str = preds[qid]
# Take max over all gold answers
__UpperCAmelCase : Tuple = max(compute_exact(lowercase_ , lowercase_ ) for a in gold_answers )
__UpperCAmelCase : str = max(compute_fa(lowercase_ , lowercase_ ) for a in gold_answers )
return exact_scores, fa_scores
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = {}
for qid, s in scores.items():
__UpperCAmelCase : Tuple = na_probs[qid] > na_prob_thresh
if pred_na:
__UpperCAmelCase : Union[str, Any] = float(not qid_to_has_ans[qid] )
else:
__UpperCAmelCase : List[str] = s
return new_scores
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None ) -> int:
'''simple docstring'''
if not qid_list:
__UpperCAmelCase : int = len(lowercase_ )
return collections.OrderedDict(
[
('''exact''', 1_0_0.0 * sum(exact_scores.values() ) / total),
('''f1''', 1_0_0.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__UpperCAmelCase : int = len(lowercase_ )
return collections.OrderedDict(
[
('''exact''', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
__UpperCAmelCase : Optional[int] = new_eval[k]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
plt.step(lowercase_ , lowercase_ , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(lowercase_ , lowercase_ , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(lowercase_ )
plt.savefig(lowercase_ )
plt.clf()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] )
__UpperCAmelCase : Union[str, Any] = 0.0
__UpperCAmelCase : int = 1.0
__UpperCAmelCase : str = 0.0
__UpperCAmelCase : str = [1.0]
__UpperCAmelCase : str = [0.0]
__UpperCAmelCase : Union[str, Any] = 0.0
for i, qid in enumerate(lowercase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__UpperCAmelCase : Dict = true_pos / float(i + 1 )
__UpperCAmelCase : List[Any] = true_pos / float(lowercase_ )
if i == len(lowercase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowercase_ )
recalls.append(lowercase_ )
if out_image:
plot_pr_curve(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return {"ap": 1_0_0.0 * avg_prec}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
if out_image_dir and not os.path.exists(lowercase_ ):
os.makedirs(lowercase_ )
__UpperCAmelCase : str = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__UpperCAmelCase : Tuple = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__UpperCAmelCase : Dict = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__UpperCAmelCase : List[str] = {k: float(lowercase_ ) for k, v in qid_to_has_ans.items()}
__UpperCAmelCase : List[str] = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(lowercase_ , lowercase_ , '''pr_exact''' )
merge_eval(lowercase_ , lowercase_ , '''pr_f1''' )
merge_eval(lowercase_ , lowercase_ , '''pr_oracle''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if not qid_list:
return
__UpperCAmelCase : Tuple = [na_probs[k] for k in qid_list]
__UpperCAmelCase : str = np.ones_like(lowercase_ ) / float(len(lowercase_ ) )
plt.hist(lowercase_ , weights=lowercase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(lowercase_ , f"na_prob_hist_{name}.png" ) )
plt.clf()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__UpperCAmelCase : Any = num_no_ans
__UpperCAmelCase : Optional[Any] = cur_score
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] )
for i, qid in enumerate(lowercase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__UpperCAmelCase : Tuple = scores[qid]
else:
if preds[qid]:
__UpperCAmelCase : Optional[Any] = -1
else:
__UpperCAmelCase : str = 0
cur_score += diff
if cur_score > best_score:
__UpperCAmelCase : Optional[int] = cur_score
__UpperCAmelCase : int = na_probs[qid]
return 1_0_0.0 * best_score / len(lowercase_ ), best_thresh
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : int = best_exact
__UpperCAmelCase : List[Any] = exact_thresh
__UpperCAmelCase : Union[str, Any] = best_fa
__UpperCAmelCase : Optional[Any] = fa_thresh
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
with open(OPTS.data_file ) as f:
__UpperCAmelCase : List[Any] = json.load(lowercase_ )
__UpperCAmelCase : List[Any] = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__UpperCAmelCase : str = json.load(lowercase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__UpperCAmelCase : Optional[int] = json.load(lowercase_ )
else:
__UpperCAmelCase : List[str] = {k: 0.0 for k in preds}
__UpperCAmelCase : int = make_qid_to_has_ans(lowercase_ ) # maps qid to True/False
__UpperCAmelCase : int = [k for k, v in qid_to_has_ans.items() if v]
__UpperCAmelCase : List[Any] = [k for k, v in qid_to_has_ans.items() if not v]
__UpperCAmelCase , __UpperCAmelCase : Dict = get_raw_scores(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
__UpperCAmelCase : Optional[Any] = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
__UpperCAmelCase : Any = make_eval_dict(lowercase_ , lowercase_ )
if has_ans_qids:
__UpperCAmelCase : Tuple = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , '''HasAns''' )
if no_ans_qids:
__UpperCAmelCase : List[str] = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , OPTS.out_image_dir )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
else:
print(json.dumps(lowercase_ , indent=2 ) )
if __name__ == "__main__":
lowerCAmelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 675 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
lowerCAmelCase = {
"""junnyu/roformer_chinese_small""": 1_536,
"""junnyu/roformer_chinese_base""": 1_536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
lowerCAmelCase = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase : Tuple = RoFormerTokenizer
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__="[UNK]" , lowercase__="[SEP]" , lowercase__="[PAD]" , lowercase__="[CLS]" , lowercase__="[MASK]" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
__UpperCAmelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get('''lowercase''' , lowercase__) != do_lower_case
or pre_tok_state.get('''strip_accents''' , lowercase__) != strip_accents
):
__UpperCAmelCase : Optional[int] = getattr(lowercase__ , pre_tok_state.pop('''type'''))
__UpperCAmelCase : Optional[int] = do_lower_case
__UpperCAmelCase : Dict = strip_accents
__UpperCAmelCase : str = pre_tok_class(**lowercase__)
__UpperCAmelCase : Dict = do_lower_case
def __getstate__( self):
__UpperCAmelCase : str = self.__dict__.copy()
__UpperCAmelCase : Any = BertPreTokenizer()
return state
def __setstate__( self , lowercase__):
__UpperCAmelCase : Optional[Any] = d
__UpperCAmelCase : Optional[int] = self.__dict__['''_tokenizer'''].get_vocab()
__UpperCAmelCase : Optional[Any] = PreTokenizer.custom(JiebaPreTokenizer(lowercase__))
def A( self , lowercase__ , lowercase__=None):
__UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : List[str] = self._tokenizer.model.save(lowercase__ , name=lowercase__)
return tuple(lowercase__)
def A( self , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=False , **lowercase__ , ):
__UpperCAmelCase : Optional[int] = BertPreTokenizer()
return super().save_pretrained(lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__)
| 675 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 1 |
from ...processing_utils import ProcessorMixin
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Optional[int] = ['''image_processor''', '''feature_extractor''']
_lowerCAmelCase : Optional[Any] = '''TvltImageProcessor'''
_lowerCAmelCase : str = '''TvltFeatureExtractor'''
def __init__( self , lowercase__ , lowercase__):
super().__init__(image_processor=lowercase__ , feature_extractor=lowercase__)
__UpperCAmelCase : Any = image_processor
__UpperCAmelCase : str = feature_extractor
def __call__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , *lowercase__ , **lowercase__ , ):
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''')
__UpperCAmelCase : List[str] = None
if images is not None:
__UpperCAmelCase : List[str] = self.image_processor(lowercase__ , mask_pixel=lowercase__ , *lowercase__ , **lowercase__)
if images_mixed is not None:
__UpperCAmelCase : Union[str, Any] = self.image_processor(lowercase__ , is_mixed=lowercase__ , *lowercase__ , **lowercase__)
if audio is not None:
__UpperCAmelCase : List[Any] = self.feature_extractor(
lowercase__ , *lowercase__ , sampling_rate=lowercase__ , mask_audio=lowercase__ , **lowercase__)
__UpperCAmelCase : List[str] = {}
if audio is not None:
output_dict.update(lowercase__)
if images is not None:
output_dict.update(lowercase__)
if images_mixed_dict is not None:
output_dict.update(lowercase__)
return output_dict
@property
def A( self):
__UpperCAmelCase : Tuple = self.image_processor.model_input_names
__UpperCAmelCase : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 675 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 1 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 1 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
lowerCAmelCase = 2_048
lowerCAmelCase = 4_096
lowerCAmelCase = 42
lowerCAmelCase = os.environ.pop("""PROCESS_TRAIN""", """false""")
lowerCAmelCase = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
'''simple docstring'''
def choose_first(lowercase_ , lowercase_=False ):
assert isinstance(lowercase_ , lowercase_ )
if len(lowercase_ ) == 1:
__UpperCAmelCase : Any = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__UpperCAmelCase : str = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
__UpperCAmelCase : Optional[Any] = {'''id''': example['''id''']}
__UpperCAmelCase : int = example['''annotations''']
__UpperCAmelCase : Optional[Any] = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
__UpperCAmelCase : Optional[int] = ['''yes'''] if 1 in yes_no_answer else ['''no''']
__UpperCAmelCase : int = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Optional[int] = ['''<cls>''']
else:
__UpperCAmelCase : Optional[int] = ['''short''']
__UpperCAmelCase : int = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
__UpperCAmelCase : Union[str, Any] = ['''long''']
__UpperCAmelCase : List[str] = choose_first(annotation['''long_answer'''] , is_long_answer=lowercase_ )
__UpperCAmelCase : Optional[int] = []
answer.update(lowercase_ )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
__UpperCAmelCase : Tuple = True
else:
__UpperCAmelCase : Any = False
__UpperCAmelCase : Tuple = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , lowercase_ ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[Any] = _get_single_answer(lowercase_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__UpperCAmelCase : List[str] = example['''document''']['''tokens''']
__UpperCAmelCase : Tuple = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(lowercase_ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__UpperCAmelCase : Union[str, Any] = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__UpperCAmelCase : Dict = example['''document''']['''tokens''']
__UpperCAmelCase : str = answer['''start_token''']
__UpperCAmelCase : Dict = answer['''end_token''']
__UpperCAmelCase : Union[str, Any] = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__UpperCAmelCase : List[str] = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
__UpperCAmelCase : Dict = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
__UpperCAmelCase : Optional[int] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
__UpperCAmelCase : Tuple = ''' '''.join([old[i] for i in range(len(lowercase_ ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , lowercase_ , end='''\n''' )
print('''Old:''' , lowercase_ , end='''\n\n''' )
return {
"context": " ".join(lowercase_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=2048 , lowercase_=4096 , lowercase_=True ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : int = get_context_and_ans(lowercase_ , assertion=lowercase_ )
__UpperCAmelCase : int = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__UpperCAmelCase : Dict = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
__UpperCAmelCase : Any = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__UpperCAmelCase : Any = []
__UpperCAmelCase : str = []
__UpperCAmelCase : str = input_ids[:q_len]
__UpperCAmelCase : Any = range(lowercase_ , len(lowercase_ ) , max_length - doc_stride )
for i in doc_start_indices:
__UpperCAmelCase : Optional[int] = i + max_length - q_len
__UpperCAmelCase : int = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowercase_ ),
"end_token": [-100] * len(lowercase_ ),
"category": category,
},
}
__UpperCAmelCase : List[Any] = out['''context'''].split()
__UpperCAmelCase : Optional[Any] = splitted_context[answer['''end_token''']]
__UpperCAmelCase : Dict = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=lowercase_ , ).input_ids )
__UpperCAmelCase : Optional[Any] = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=lowercase_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__UpperCAmelCase : Dict = len(tokenizer(lowercase_ , add_special_tokens=lowercase_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__UpperCAmelCase : List[str] = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
__UpperCAmelCase : str = answer['''start_token''']
__UpperCAmelCase : Optional[int] = answer['''end_token''']
if assertion:
__UpperCAmelCase : Optional[int] = tokenizer.decode(lowercase_ )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , lowercase_ , end='''\n\n''' )
if len(lowercase_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__UpperCAmelCase : Optional[Any] = input_ids[:q_len]
__UpperCAmelCase : Tuple = range(lowercase_ , len(lowercase_ ) , max_length - doc_stride )
__UpperCAmelCase : str = []
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Any = [] # null, yes, no, long, short
for i in doc_start_indices:
__UpperCAmelCase : Union[str, Any] = i + max_length - q_len
__UpperCAmelCase : Optional[int] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__UpperCAmelCase : int = start_token - i + q_len
__UpperCAmelCase : Any = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
__UpperCAmelCase : Any = -100
__UpperCAmelCase : List[Any] = -100
answers_category.append('''null''' )
__UpperCAmelCase : Dict = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowercase_ )
answers_end_token.append(lowercase_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(lowercase_ ) )
print('''Old:''' , tokenizer.decode(lowercase_ ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=2048 , lowercase_=4096 , lowercase_=False ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[str] = get_strided_contexts_and_ans(
lowercase_ , lowercase_ , doc_stride=lowercase_ , max_length=lowercase_ , assertion=lowercase_ , )
return example
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
with jsonlines.open(lowercase_ , '''a''' ) as writer:
for example in tqdm(lowercase_ , total=len(lowercase_ ) , desc='''Saving samples ... ''' ):
__UpperCAmelCase : Union[str, Any] = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
lowerCAmelCase = load_dataset("""natural_questions""")
lowerCAmelCase = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
lowerCAmelCase = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
lowerCAmelCase = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
lowerCAmelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
lowerCAmelCase = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
lowerCAmelCase = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 675 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
return int(input_a == input_a == 0 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"| 0 | 0 | {nor_gate(0 , 0 )} |" )
print(f"| 0 | 1 | {nor_gate(0 , 1 )} |" )
print(f"| 1 | 0 | {nor_gate(1 , 0 )} |" )
print(f"| 1 | 1 | {nor_gate(1 , 1 )} |" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase = CLIPImageProcessor()
lowerCAmelCase = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
lowerCAmelCase = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 675 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__)
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
__UpperCAmelCase : Dict = int(lowercase__)
__UpperCAmelCase : Tuple = dict(sorted(self.labels.items()))
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 4.0 , lowercase__ = None , lowercase__ = 5_0 , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : List[str] = len(lowercase__)
__UpperCAmelCase : str = self.transformer.config.sample_size
__UpperCAmelCase : List[str] = self.transformer.config.in_channels
__UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
__UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase__ , device=self.device).reshape(-1)
__UpperCAmelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device)
__UpperCAmelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
__UpperCAmelCase : List[str] = latent_model_input[: len(lowercase__) // 2]
__UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0)
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : Any = t
if not torch.is_tensor(lowercase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase : List[str] = latent_model_input.device.type == '''mps'''
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
__UpperCAmelCase : Any = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , len(lowercase__) // 2 , dim=0)
__UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase : str = torch.cat([half_eps, half_eps] , dim=0)
__UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , lowercase__ , dim=1)
else:
__UpperCAmelCase : Any = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
__UpperCAmelCase : List[Any] = latent_model_input
__UpperCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase : Optional[int] = self.vae.decode(lowercase__).sample
__UpperCAmelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : str = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
import heapq
import sys
import numpy as np
lowerCAmelCase = tuple[int, int]
class lowerCamelCase :
def __init__( self):
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = set()
def A( self):
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''')
def A( self):
return len(self.elements) == 0
def A( self , lowercase__ , lowercase__):
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowercase__)
else:
# update
# print("update", item)
__UpperCAmelCase : List[str] = []
((__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[int] = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[Any] = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def A( self , lowercase__):
if item in self.set:
self.set.remove(lowercase__)
__UpperCAmelCase : List[Any] = []
((__UpperCAmelCase) , (__UpperCAmelCase)) : Dict = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[int] = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def A( self):
return self.elements[0][1]
def A( self):
((__UpperCAmelCase) , (__UpperCAmelCase)) : List[Any] = heapq.heappop(self.elements)
self.set.remove(lowercase__)
return (priority, item)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.array(lowercase_ )
__UpperCAmelCase : Optional[Any] = np.array(lowercase_ )
return np.linalg.norm(a - b )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
return consistent_heuristic(lowercase_ , lowercase_ ) // t
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = g_function[start] + Wa * heuristics[i](lowercase_ , lowercase_ )
return ans
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Dict = np.chararray((n, n) )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
__UpperCAmelCase : str = '''*'''
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if (j, (n - 1) - i) in blocks:
__UpperCAmelCase : List[str] = '''#'''
__UpperCAmelCase : Union[str, Any] = '''-'''
__UpperCAmelCase : List[str] = back_pointer[goal]
while x != start:
((__UpperCAmelCase) , (__UpperCAmelCase)) : int = x
# print(x)
__UpperCAmelCase : List[str] = '''-'''
__UpperCAmelCase : str = back_pointer[x]
__UpperCAmelCase : Dict = '''-'''
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCAmelCase : Any = back_pointer[goal]
while x != start:
print(lowercase_ , end=''' ''' )
__UpperCAmelCase : Optional[Any] = back_pointer[x]
print(lowercase_ )
sys.exit()
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Union[str, Any]:
'''simple docstring'''
for itera in range(lowercase_ ):
open_list[itera].remove_element(lowercase_ )
# print("s", s)
# print("j", j)
((__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[int] = s
__UpperCAmelCase : Any = (x - 1, y)
__UpperCAmelCase : Any = (x + 1, y)
__UpperCAmelCase : List[Any] = (x, y + 1)
__UpperCAmelCase : Any = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowercase_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowercase_ )
__UpperCAmelCase : Optional[int] = -1
__UpperCAmelCase : Dict = float('''inf''' )
if valid(lowercase_ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCAmelCase : Tuple = g_function[s] + 1
__UpperCAmelCase : Tuple = s
if neighbours not in close_list_anchor:
open_list[0].put(lowercase_ , key(lowercase_ , 0 , lowercase_ , lowercase_ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowercase_ ):
if key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) <= Wa * key(
lowercase_ , 0 , lowercase_ , lowercase_ ):
open_list[j].put(
lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowerCAmelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowerCAmelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
lowerCAmelCase = make_common_ground()
lowerCAmelCase = blocks_blk
# hyper parameters
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = 20
lowerCAmelCase = 3 # one consistent and two other inconsistent
# start and end destination
lowerCAmelCase = (0, 0)
lowerCAmelCase = (n - 1, n - 1)
lowerCAmelCase = 1
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = {start: 0, goal: float('''inf''' )}
__UpperCAmelCase : Any = {start: -1, goal: -1}
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : List[str] = set()
for i in range(lowercase_ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) )
__UpperCAmelCase : list[int] = []
__UpperCAmelCase : list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , lowercase_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowercase_ , lowercase_ , lowercase_ )
else:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = open_list[i].top_show()
visited.add(lowercase_ )
expand_state(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
close_list_inad.append(lowercase_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowercase_ , lowercase_ , lowercase_ )
else:
__UpperCAmelCase : Tuple = open_list[0].top_show()
visited.add(lowercase_ )
expand_state(
lowercase_ , 0 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
close_list_anchor.append(lowercase_ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowercase_ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 675 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 1 |
import argparse
import os
import re
lowerCAmelCase = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
lowerCAmelCase = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""")
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = False ) -> Optional[Any]:
'''simple docstring'''
with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as f:
__UpperCAmelCase : Optional[int] = f.read()
__UpperCAmelCase : int = content.split('''\n''' )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Union[str, Any] = 0
while line_idx < len(lowercase_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__UpperCAmelCase : Optional[int] = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__UpperCAmelCase : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__UpperCAmelCase : Optional[Any] = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__UpperCAmelCase : Optional[Any] = sorted(lowercase_ , key=lambda lowercase_ : _re_identifier.search(lowercase_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowercase_ ) )
elif "\n".join(lowercase_ ) != content:
return True
def __SCREAMING_SNAKE_CASE ( lowercase_ = False ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = [os.path.join(lowercase_ , lowercase_ ) for f in os.listdir(lowercase_ ) if f.endswith('''.py''' )]
__UpperCAmelCase : int = [sort_auto_mapping(lowercase_ , overwrite=lowercase_ ) for fname in fnames]
if not overwrite and any(lowercase_ ):
__UpperCAmelCase : Any = [f for f, d in zip(lowercase_ , lowercase_ ) if d]
raise ValueError(
f"The following files have auto mappings that need sorting: {', '.join(lowercase_ )}. Run `make style` to fix"
''' this.''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 675 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 1 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[list[float]]:
'''simple docstring'''
__UpperCAmelCase : Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowercase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__UpperCAmelCase : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__UpperCAmelCase : Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
__UpperCAmelCase , __UpperCAmelCase : Dict = matrix[1][1], matrix[0][0]
__UpperCAmelCase , __UpperCAmelCase : int = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowercase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowercase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__UpperCAmelCase : Any = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__UpperCAmelCase : str = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__UpperCAmelCase : List[str] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__UpperCAmelCase : Any = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__UpperCAmelCase : Dict = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__UpperCAmelCase : Optional[int] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__UpperCAmelCase : Optional[Any] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__UpperCAmelCase : Optional[int] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__UpperCAmelCase : str = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__UpperCAmelCase : Optional[int] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__UpperCAmelCase : int = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__UpperCAmelCase : Dict = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
__UpperCAmelCase : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__UpperCAmelCase : str = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowercase_ )
# Calculate the inverse of the matrix
return [[float(d(lowercase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 675 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
lowerCAmelCase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 675 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Any = '''gpt_neo'''
_lowerCAmelCase : Tuple = ['''past_key_values''']
_lowerCAmelCase : str = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowercase__=5_0_2_5_7 , lowercase__=2_0_4_8 , lowercase__=2_0_4_8 , lowercase__=2_4 , lowercase__=[[["global", "local"], 1_2]] , lowercase__=1_6 , lowercase__=None , lowercase__=2_5_6 , lowercase__="gelu_new" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=1e-5 , lowercase__=0.0_2 , lowercase__=True , lowercase__=5_0_2_5_6 , lowercase__=5_0_2_5_6 , **lowercase__ , ):
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Dict = num_layers
__UpperCAmelCase : Union[str, Any] = num_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Optional[Any] = window_size
__UpperCAmelCase : List[str] = activation_function
__UpperCAmelCase : Union[str, Any] = resid_dropout
__UpperCAmelCase : Any = embed_dropout
__UpperCAmelCase : Optional[int] = attention_dropout
__UpperCAmelCase : List[str] = classifier_dropout
__UpperCAmelCase : Optional[int] = layer_norm_epsilon
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Tuple = bos_token_id
__UpperCAmelCase : Optional[Any] = eos_token_id
__UpperCAmelCase : List[Any] = attention_types
__UpperCAmelCase : str = self.expand_attention_types_params(lowercase__)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
F"but is `len(config.attention_layers) = {len(self.attention_layers)}`, "
F"`config.num_layers = {self.num_layers}`. "
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''')
super().__init__(bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
@staticmethod
def A( lowercase__):
__UpperCAmelCase : int = []
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
import torch
__UpperCAmelCase : Tuple = input.size()
__UpperCAmelCase : Any = len(lowercase_ )
__UpperCAmelCase : str = shape[dimension]
__UpperCAmelCase : List[str] = torch.arange(0 , lowercase_ , lowercase_ )
__UpperCAmelCase : Tuple = torch.div(sizedim - size , lowercase_ , rounding_mode='''floor''' ) + 1
__UpperCAmelCase : List[Any] = torch.arange(lowercase_ ) + low_indices[:min_length][:, None]
__UpperCAmelCase : Tuple = [slice(lowercase_ )] * rank
__UpperCAmelCase : str = indices
__UpperCAmelCase : Any = input[s]
__UpperCAmelCase : int = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
import torch
__UpperCAmelCase : Optional[int] = torch.arange(1 , lowercase_ )
__UpperCAmelCase : Dict = torch.remainder(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = remainders == 0
__UpperCAmelCase : str = candidates[divisor_indices]
__UpperCAmelCase : List[Any] = torch.max(lowercase_ )
return largest_divisor, torch.div(lowercase_ , lowercase_ , rounding_mode='''floor''' )
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
__UpperCAmelCase : Dict = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(lowercase__ , direction='''inputs''')
__UpperCAmelCase : Dict = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def A( self):
return self._config.num_heads
def A( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ):
__UpperCAmelCase : Dict = super(lowercase__ , self).generate_dummy_inputs(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__)
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase : List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
__UpperCAmelCase , __UpperCAmelCase : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCAmelCase : Optional[Any] = seqlen + 2
__UpperCAmelCase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCAmelCase : Dict = [
(torch.zeros(lowercase__), torch.zeros(lowercase__)) for _ in range(self.num_layers)
]
__UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
__UpperCAmelCase : Dict = ordered_inputs['''attention_mask'''].dtype
__UpperCAmelCase : int = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__)] , dim=1)
return ordered_inputs
@property
def A( self):
return 1_3
| 675 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''M-CLIP'''
def __init__( self , lowercase__=1_0_2_4 , lowercase__=7_6_8 , **lowercase__):
__UpperCAmelCase : Optional[int] = transformerDimSize
__UpperCAmelCase : Optional[int] = imageDimSize
super().__init__(**lowercase__)
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = MCLIPConfig
def __init__( self , lowercase__ , *lowercase__ , **lowercase__):
super().__init__(lowercase__ , *lowercase__ , **lowercase__)
__UpperCAmelCase : int = XLMRobertaModel(lowercase__)
__UpperCAmelCase : Optional[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = self.transformer(input_ids=lowercase__ , attention_mask=lowercase__)[0]
__UpperCAmelCase : Any = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(lowercase__), embs
| 675 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 | 1 |
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase = """docs/source/en/_toctree.yml"""
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = defaultdict(lowercase_ )
__UpperCAmelCase : Any = []
__UpperCAmelCase : Tuple = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(lowercase_ )
__UpperCAmelCase : List[Any] = new_doc_list
__UpperCAmelCase : Dict = [key for key, value in counts.items() if value > 1]
__UpperCAmelCase : Union[str, Any] = []
for duplicate_key in duplicates:
__UpperCAmelCase : List[str] = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(lowercase_ ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
__UpperCAmelCase : int = sorted(lowercase_ , key=lambda lowercase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowercase_ ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(lowercase_ )
# Sort
return overview_doc
def __SCREAMING_SNAKE_CASE ( lowercase_=False ) -> List[Any]:
'''simple docstring'''
with open(lowercase_ , encoding='''utf-8''' ) as f:
__UpperCAmelCase : Optional[int] = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase : List[Any] = content[api_idx]['''sections''']
# Then to the model doc
__UpperCAmelCase : Any = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__UpperCAmelCase : Union[str, Any] = api_doc[scheduler_idx]['''sections''']
__UpperCAmelCase : Optional[int] = clean_doc_toc(lowercase_ )
__UpperCAmelCase : Optional[int] = False
if new_scheduler_doc != scheduler_doc:
__UpperCAmelCase : Tuple = True
if overwrite:
__UpperCAmelCase : Union[str, Any] = new_scheduler_doc
if diff:
if overwrite:
__UpperCAmelCase : Dict = api_doc
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowercase_ , allow_unicode=lowercase_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def __SCREAMING_SNAKE_CASE ( lowercase_=False ) -> List[Any]:
'''simple docstring'''
with open(lowercase_ , encoding='''utf-8''' ) as f:
__UpperCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase : Optional[int] = content[api_idx]['''sections''']
# Then to the model doc
__UpperCAmelCase : Any = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__UpperCAmelCase : Any = False
__UpperCAmelCase : Union[str, Any] = api_doc[pipeline_idx]['''sections''']
__UpperCAmelCase : List[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__UpperCAmelCase : Dict = pipeline_doc['''section''']
__UpperCAmelCase : Optional[Any] = clean_doc_toc(lowercase_ )
if overwrite:
__UpperCAmelCase : int = new_sub_pipeline_doc
new_pipeline_docs.append(lowercase_ )
# sort overall pipeline doc
__UpperCAmelCase : List[str] = clean_doc_toc(lowercase_ )
if new_pipeline_docs != pipeline_docs:
__UpperCAmelCase : Tuple = True
if overwrite:
__UpperCAmelCase : List[Any] = new_pipeline_docs
if diff:
if overwrite:
__UpperCAmelCase : Union[str, Any] = api_doc
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowercase_ , allow_unicode=lowercase_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 675 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict[str, str]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__UpperCAmelCase : Optional[Any] = remove_duplicates(key.upper() )
__UpperCAmelCase : Union[str, Any] = len(lowercase_ )
# First fill cipher with key characters
__UpperCAmelCase : Optional[Any] = {alphabet[i]: char for i, char in enumerate(lowercase_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowercase_ ) , 26 ):
__UpperCAmelCase : str = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__UpperCAmelCase : str = alphabet[i - offset]
__UpperCAmelCase : Tuple = char
return cipher_alphabet
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
return "".join(cipher_map.get(lowercase_ , lowercase_ ) for ch in message.upper() )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowercase_ , lowercase_ ) for ch in message.upper() )
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : List[Any] = input('''Enter message to encode or decode: ''' ).strip()
__UpperCAmelCase : int = input('''Enter keyword: ''' ).strip()
__UpperCAmelCase : Tuple = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__UpperCAmelCase : Dict = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__UpperCAmelCase : List[str] = create_cipher_map(lowercase_ )
print(func(lowercase_ , lowercase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : torch.FloatTensor
_lowerCAmelCase : torch.FloatTensor
_lowerCAmelCase : Optional[torch.FloatTensor] = None
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
_lowerCAmelCase : Tuple = 2
@register_to_config
def __init__( self , lowercase__ = 0.0_2 , lowercase__ = 1_0_0 , lowercase__ = 1.0_0_7 , lowercase__ = 8_0 , lowercase__ = 0.0_5 , lowercase__ = 5_0 , ):
# standard deviation of the initial noise distribution
__UpperCAmelCase : str = sigma_max
# setable values
__UpperCAmelCase : int = None
__UpperCAmelCase : np.IntTensor = None
__UpperCAmelCase : torch.FloatTensor = None # sigma(t_i)
def A( self , lowercase__ , lowercase__ = None):
return sample
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : str = num_inference_steps
__UpperCAmelCase : Optional[int] = np.arange(0 , self.num_inference_steps)[::-1].copy()
__UpperCAmelCase : List[Any] = torch.from_numpy(lowercase__).to(lowercase__)
__UpperCAmelCase : List[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__UpperCAmelCase : Any = torch.tensor(lowercase__ , dtype=torch.floataa , device=lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ = None):
if self.config.s_min <= sigma <= self.config.s_max:
__UpperCAmelCase : Optional[Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1)
else:
__UpperCAmelCase : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
__UpperCAmelCase : Any = self.config.s_noise * randn_tensor(sample.shape , generator=lowercase__).to(sample.device)
__UpperCAmelCase : str = sigma + gamma * sigma
__UpperCAmelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = True , ):
__UpperCAmelCase : Dict = sample_hat + sigma_hat * model_output
__UpperCAmelCase : List[str] = (sample_hat - pred_original_sample) / sigma_hat
__UpperCAmelCase : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase__ , derivative=lowercase__ , pred_original_sample=lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = True , ):
__UpperCAmelCase : Optional[int] = sample_prev + sigma_prev * model_output
__UpperCAmelCase : Optional[Any] = (sample_prev - pred_original_sample) / sigma_prev
__UpperCAmelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase__ , derivative=lowercase__ , pred_original_sample=lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__):
raise NotImplementedError()
| 675 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 1 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowerCAmelCase = logging.getLogger(__name__)
class lowerCamelCase ( _UpperCamelCase ):
def A( self , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None):
__UpperCAmelCase : Optional[int] = self.layer[current_layer](lowercase__ , lowercase__ , head_mask[current_layer])
__UpperCAmelCase : List[str] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , _UpperCamelCase , )
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__):
super().__init__(lowercase__)
__UpperCAmelCase : Dict = BertEncoderWithPabee(lowercase__)
self.init_weights()
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : str = 0
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[str] = 0
def A( self , lowercase__):
__UpperCAmelCase : str = threshold
def A( self , lowercase__):
__UpperCAmelCase : str = patience
def A( self):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Optional[int] = 0
def A( self):
__UpperCAmelCase : List[Any] = self.inference_layers_num / self.inference_instances_num
__UpperCAmelCase : Optional[int] = (
F"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
F" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(lowercase__)
@add_start_docstrings_to_model_forward(lowercase__)
def A( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''')
elif input_ids is not None:
__UpperCAmelCase : str = input_ids.size()
elif inputs_embeds is not None:
__UpperCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''')
__UpperCAmelCase : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCAmelCase : Any = torch.ones(lowercase__ , device=lowercase__)
if token_type_ids is None:
__UpperCAmelCase : List[Any] = torch.zeros(lowercase__ , dtype=torch.long , device=lowercase__)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(lowercase__ , lowercase__ , lowercase__)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = encoder_hidden_states.size()
__UpperCAmelCase : Tuple = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__UpperCAmelCase : str = torch.ones(lowercase__ , device=lowercase__)
__UpperCAmelCase : int = self.invert_attention_mask(lowercase__)
else:
__UpperCAmelCase : List[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCAmelCase : Any = self.get_head_mask(lowercase__ , self.config.num_hidden_layers)
__UpperCAmelCase : Union[str, Any] = self.embeddings(
input_ids=lowercase__ , position_ids=lowercase__ , token_type_ids=lowercase__ , inputs_embeds=lowercase__)
__UpperCAmelCase : Any = embedding_output
if self.training:
__UpperCAmelCase : Any = []
for i in range(self.config.num_hidden_layers):
__UpperCAmelCase : int = self.encoder.adaptive_forward(
lowercase__ , current_layer=lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__)
__UpperCAmelCase : Optional[int] = self.pooler(lowercase__)
__UpperCAmelCase : List[Any] = output_layers[i](output_dropout(lowercase__))
res.append(lowercase__)
elif self.patience == 0: # Use all layers for inference
__UpperCAmelCase : Optional[int] = self.encoder(
lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , )
__UpperCAmelCase : Optional[Any] = self.pooler(encoder_outputs[0])
__UpperCAmelCase : Optional[Any] = [output_layers[self.config.num_hidden_layers - 1](lowercase__)]
else:
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Any = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
__UpperCAmelCase : Optional[int] = self.encoder.adaptive_forward(
lowercase__ , current_layer=lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__)
__UpperCAmelCase : Tuple = self.pooler(lowercase__)
__UpperCAmelCase : List[Any] = output_layers[i](lowercase__)
if regression:
__UpperCAmelCase : Optional[int] = logits.detach()
if patient_result is not None:
__UpperCAmelCase : Tuple = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = logits.detach().argmax(dim=1)
if patient_result is not None:
__UpperCAmelCase : Optional[int] = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(lowercase__)):
patient_counter += 1
else:
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : str = logits
if patient_counter == self.patience:
break
__UpperCAmelCase : Optional[int] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , _UpperCamelCase , )
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__):
super().__init__(lowercase__)
__UpperCAmelCase : Optional[int] = config.num_labels
__UpperCAmelCase : Any = BertModelWithPabee(lowercase__)
__UpperCAmelCase : List[str] = nn.Dropout(config.hidden_dropout_prob)
__UpperCAmelCase : Dict = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(lowercase__)
def A( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ):
__UpperCAmelCase : str = self.bert(
input_ids=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , position_ids=lowercase__ , head_mask=lowercase__ , inputs_embeds=lowercase__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__UpperCAmelCase : Dict = (logits[-1],)
if labels is not None:
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : str = 0
for ix, logits_item in enumerate(lowercase__):
if self.num_labels == 1:
# We are doing regression
__UpperCAmelCase : str = MSELoss()
__UpperCAmelCase : Optional[Any] = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
__UpperCAmelCase : Dict = CrossEntropyLoss()
__UpperCAmelCase : Tuple = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
__UpperCAmelCase : Dict = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__UpperCAmelCase : Dict = (total_loss / total_weights,) + outputs
return outputs
| 675 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = False ) -> bool:
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
__UpperCAmelCase : Optional[int] = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
__UpperCAmelCase : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(lowercase_ , 1 ):
if n < _p:
# then we have our last prime to check
__UpperCAmelCase : Optional[Any] = primes[:idx]
break
__UpperCAmelCase , __UpperCAmelCase : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__UpperCAmelCase : Union[str, Any] = False
for r in range(lowercase_ ):
__UpperCAmelCase : Dict = pow(lowercase_ , d * 2**r , lowercase_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__UpperCAmelCase : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 675 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 1 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowerCamelCase :
_lowerCAmelCase : str = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
_lowerCAmelCase : str = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
_lowerCAmelCase : str = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
_lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
_lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments,) )
((__UpperCAmelCase) , ) : Optional[Any] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__UpperCAmelCase : List[str] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__UpperCAmelCase : str = True
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : str = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowercase_ , decoder_config=lowercase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__UpperCAmelCase : Tuple = decoder_config.decoder_start_token_id
__UpperCAmelCase : str = decoder_config.pad_token_id
if decoder_start_token_id is None:
__UpperCAmelCase : Optional[Any] = decoder_config.bos_token_id
if pad_token_id is None:
__UpperCAmelCase : Any = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__UpperCAmelCase : Any = decoder_config.eos_token_id
__UpperCAmelCase : Dict = decoder_start_token_id
__UpperCAmelCase : Optional[int] = pad_token_id
__UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : int = 2
__UpperCAmelCase : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase_ )
if n > 1:
factors.append(lowercase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 1 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE ( ) -> Iterator[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 2
while True:
if is_prime(lowercase_ ):
yield num
num += 1
def __SCREAMING_SNAKE_CASE ( lowercase_ = 2000000 ) -> int:
'''simple docstring'''
return sum(takewhile(lambda lowercase_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 675 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : Tuple = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__UpperCAmelCase : Dict = dict(zip(lowercase__ , range(len(lowercase__))))
__UpperCAmelCase : Dict = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__UpperCAmelCase : Tuple = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(lowercase__) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(lowercase__))
__UpperCAmelCase : List[str] = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , lowercase__)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(lowercase__ , lowercase__)
def A( self , **lowercase__):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **lowercase__)
def A( self , **lowercase__):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **lowercase__)
def A( self , **lowercase__):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase__)
def A( self):
shutil.rmtree(self.tmpdirname)
def A( self):
__UpperCAmelCase : str = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
__UpperCAmelCase : Dict = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1)) for x in image_inputs]
return image_inputs
def A( self):
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = self.get_image_processor()
__UpperCAmelCase : Dict = OwlViTProcessor(tokenizer=lowercase__ , image_processor=lowercase__)
processor_slow.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase__)
__UpperCAmelCase : List[str] = OwlViTProcessor(tokenizer=lowercase__ , image_processor=lowercase__)
processor_fast.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , lowercase__)
self.assertIsInstance(processor_fast.tokenizer , lowercase__)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , lowercase__)
self.assertIsInstance(processor_fast.image_processor , lowercase__)
def A( self):
__UpperCAmelCase : Any = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Optional[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
__UpperCAmelCase : Tuple = self.get_image_processor(do_normalize=lowercase__)
__UpperCAmelCase : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase__)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase__)
def A( self):
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Dict = OwlViTProcessor(tokenizer=lowercase__ , image_processor=lowercase__)
__UpperCAmelCase : Tuple = self.prepare_image_inputs()
__UpperCAmelCase : int = image_processor(lowercase__ , return_tensors='''np''')
__UpperCAmelCase : Tuple = processor(images=lowercase__ , return_tensors='''np''')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : Any = OwlViTProcessor(tokenizer=lowercase__ , image_processor=lowercase__)
__UpperCAmelCase : List[str] = '''lower newer'''
__UpperCAmelCase : Dict = processor(text=lowercase__ , return_tensors='''np''')
__UpperCAmelCase : Optional[int] = tokenizer(lowercase__ , return_tensors='''np''')
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist())
def A( self):
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Dict = self.get_tokenizer()
__UpperCAmelCase : List[Any] = OwlViTProcessor(tokenizer=lowercase__ , image_processor=lowercase__)
__UpperCAmelCase : Union[str, Any] = '''lower newer'''
__UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
__UpperCAmelCase : Optional[Any] = processor(text=lowercase__ , images=lowercase__)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(lowercase__):
processor()
def A( self):
__UpperCAmelCase : Tuple = '''google/owlvit-base-patch32'''
__UpperCAmelCase : Tuple = OwlViTProcessor.from_pretrained(lowercase__)
__UpperCAmelCase : Dict = ['''cat''', '''nasa badge''']
__UpperCAmelCase : List[Any] = processor(text=lowercase__)
__UpperCAmelCase : Any = 1_6
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask'''])
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length))
# test if it raises when no input is passed
with pytest.raises(lowercase__):
processor()
def A( self):
__UpperCAmelCase : List[str] = '''google/owlvit-base-patch32'''
__UpperCAmelCase : Optional[Any] = OwlViTProcessor.from_pretrained(lowercase__)
__UpperCAmelCase : Optional[int] = [['''cat''', '''nasa badge'''], ['''person''']]
__UpperCAmelCase : Any = processor(text=lowercase__)
__UpperCAmelCase : int = 1_6
__UpperCAmelCase : int = len(lowercase__)
__UpperCAmelCase : int = max([len(lowercase__) for texts in input_texts])
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask'''])
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length))
# test if it raises when no input is passed
with pytest.raises(lowercase__):
processor()
def A( self):
__UpperCAmelCase : List[Any] = '''google/owlvit-base-patch32'''
__UpperCAmelCase : Tuple = OwlViTProcessor.from_pretrained(lowercase__)
__UpperCAmelCase : Optional[int] = ['''cat''', '''nasa badge''']
__UpperCAmelCase : List[Any] = processor(text=lowercase__)
__UpperCAmelCase : Optional[int] = 1_6
__UpperCAmelCase : Optional[Any] = inputs['''input_ids''']
__UpperCAmelCase : str = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask'''])
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length))
self.assertListEqual(list(input_ids[0]) , predicted_ids[0])
self.assertListEqual(list(input_ids[1]) , predicted_ids[1])
def A( self):
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = OwlViTProcessor(tokenizer=lowercase__ , image_processor=lowercase__)
__UpperCAmelCase : str = self.prepare_image_inputs()
__UpperCAmelCase : Dict = self.prepare_image_inputs()
__UpperCAmelCase : int = processor(images=lowercase__ , query_images=lowercase__)
self.assertListEqual(list(inputs.keys()) , ['''query_pixel_values''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(lowercase__):
processor()
def A( self):
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : List[Any] = OwlViTProcessor(tokenizer=lowercase__ , image_processor=lowercase__)
__UpperCAmelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : int = processor.batch_decode(lowercase__)
__UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
| 675 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 1 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
def run_func(lowercase_ ):
@wraps(lowercase_ )
def run_in_eager_mode(*lowercase_ , **lowercase_ ):
return func(*lowercase_ , **lowercase_ )
@wraps(lowercase_ )
@tf.function(experimental_compile=lowercase_ )
def run_in_graph_mode(*lowercase_ , **lowercase_ ):
return func(*lowercase_ , **lowercase_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> ["tf.Tensor"]:
'''simple docstring'''
__UpperCAmelCase : Tuple = random.Random()
__UpperCAmelCase : Any = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowercase_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : TensorFlowBenchmarkArguments
_lowerCAmelCase : PretrainedConfig
_lowerCAmelCase : str = "TensorFlow"
@property
def A( self):
return tf.__version__
def A( self , lowercase__ , lowercase__ , lowercase__):
# initialize GPU on separate process
__UpperCAmelCase : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
__UpperCAmelCase : int = self._prepare_inference_func(lowercase__ , lowercase__ , lowercase__)
return self._measure_speed(_inference)
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : int = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
__UpperCAmelCase : List[Any] = self._prepare_train_func(lowercase__ , lowercase__ , lowercase__)
return self._measure_speed(_train)
def A( self , lowercase__ , lowercase__ , lowercase__):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase__)
__UpperCAmelCase : Tuple = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
__UpperCAmelCase : List[str] = self._prepare_inference_func(lowercase__ , lowercase__ , lowercase__)
return self._measure_memory(_inference)
def A( self , lowercase__ , lowercase__ , lowercase__):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase__)
__UpperCAmelCase : str = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
__UpperCAmelCase : Optional[Any] = self._prepare_train_func(lowercase__ , lowercase__ , lowercase__)
return self._measure_memory(_train)
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Any = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
__UpperCAmelCase : Tuple = (
hasattr(lowercase__ , '''architectures''')
and isinstance(config.architectures , lowercase__)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCAmelCase : Optional[int] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCAmelCase : List[str] = __import__('''transformers''' , fromlist=[model_class])
__UpperCAmelCase : Union[str, Any] = getattr(lowercase__ , lowercase__)
__UpperCAmelCase : Any = model_cls(lowercase__)
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
__UpperCAmelCase : int = TF_MODEL_MAPPING[config.__class__](lowercase__)
# encoder-decoder has vocab size saved differently
__UpperCAmelCase : Dict = config.vocab_size if hasattr(lowercase__ , '''vocab_size''') else config.encoder.vocab_size
__UpperCAmelCase : Optional[int] = random_input_ids(lowercase__ , lowercase__ , lowercase__)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_forward():
return model(lowercase__ , decoder_input_ids=lowercase__ , training=lowercase__)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_forward():
return model(lowercase__ , training=lowercase__)
__UpperCAmelCase : int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''')
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
__UpperCAmelCase : Tuple = (
hasattr(lowercase__ , '''architectures''')
and isinstance(config.architectures , lowercase__)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCAmelCase : List[Any] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCAmelCase : Any = __import__('''transformers''' , fromlist=[model_class])
__UpperCAmelCase : Dict = getattr(lowercase__ , lowercase__)
__UpperCAmelCase : Optional[int] = model_cls(lowercase__)
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
__UpperCAmelCase : int = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowercase__)
# encoder-decoder has vocab size saved differently
__UpperCAmelCase : Dict = config.vocab_size if hasattr(lowercase__ , '''vocab_size''') else config.encoder.vocab_size
__UpperCAmelCase : Any = random_input_ids(lowercase__ , lowercase__ , lowercase__)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_train():
__UpperCAmelCase : Any = model(lowercase__ , decoder_input_ids=lowercase__ , labels=lowercase__ , training=lowercase__)[0]
__UpperCAmelCase : str = tf.gradients(lowercase__ , model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_train():
__UpperCAmelCase : str = model(lowercase__ , labels=lowercase__ , training=lowercase__)[0]
__UpperCAmelCase : int = tf.gradients(lowercase__ , model.trainable_variables)
return gradients
__UpperCAmelCase : Dict = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A( self , lowercase__):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''')
timeit.repeat(lowercase__ , repeat=1 , number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__UpperCAmelCase : Optional[int] = timeit.repeat(
lowercase__ , repeat=self.args.repeat , number=1_0 , )
return min(lowercase__) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}")
def A( self , lowercase__):
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''')
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''')
__UpperCAmelCase : Optional[Any] = start_memory_tracing('''transformers''')
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''')
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''')
__UpperCAmelCase : Any = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''')
# init nvml
nvml.nvmlInit()
func()
__UpperCAmelCase : Optional[int] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
__UpperCAmelCase : int = nvml.nvmlDeviceGetMemoryInfo(lowercase__)
__UpperCAmelCase : Dict = meminfo.used
__UpperCAmelCase : Optional[Any] = Memory(lowercase__)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''')
__UpperCAmelCase : Any = None
else:
__UpperCAmelCase : int = measure_peak_memory_cpu(lowercase__)
__UpperCAmelCase : List[str] = Memory(lowercase__) if isinstance(lowercase__ , lowercase__) else memory_bytes
if self.args.trace_memory_line_by_line:
__UpperCAmelCase : Any = stop_memory_tracing(lowercase__)
if memory is None:
__UpperCAmelCase : Tuple = summary.total
else:
__UpperCAmelCase : List[str] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}")
return "N/A", None
| 675 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Union[str, Any] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def A( self , lowercase__=0):
__UpperCAmelCase : List[Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(lowercase__))
__UpperCAmelCase : Tuple = np.random.RandomState(lowercase__)
__UpperCAmelCase : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def A( self):
__UpperCAmelCase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : List[Any] = self.get_dummy_inputs()
__UpperCAmelCase : List[Any] = pipe(**lowercase__).images
__UpperCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : List[Any] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def A( self):
__UpperCAmelCase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : Dict = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Any = self.get_dummy_inputs()
__UpperCAmelCase : Optional[Any] = pipe(**lowercase__).images
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : str = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def A( self):
__UpperCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
# warmup pass to apply optimizations
__UpperCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs())
__UpperCAmelCase : Any = self.get_dummy_inputs()
__UpperCAmelCase : Tuple = pipe(**lowercase__).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : Optional[Any] = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def A( self):
__UpperCAmelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : Tuple = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
__UpperCAmelCase : Tuple = pipe(**lowercase__).images
__UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : Tuple = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def A( self):
__UpperCAmelCase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : Optional[Any] = pipe(**lowercase__).images
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : Any = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def A( self):
__UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Optional[Any] = self.get_dummy_inputs()
__UpperCAmelCase : Union[str, Any] = pipe(**lowercase__).images
__UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : str = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
@property
def A( self):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A( self):
__UpperCAmelCase : Optional[Any] = ort.SessionOptions()
__UpperCAmelCase : int = False
return options
def A( self):
__UpperCAmelCase : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
__UpperCAmelCase : Optional[Any] = init_image.resize((7_6_8, 5_1_2))
# using the PNDM scheduler by default
__UpperCAmelCase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Dict = '''A fantasy landscape, trending on artstation'''
__UpperCAmelCase : List[Any] = np.random.RandomState(0)
__UpperCAmelCase : List[Any] = pipe(
prompt=lowercase__ , image=lowercase__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowercase__ , output_type='''np''' , )
__UpperCAmelCase : List[str] = output.images
__UpperCAmelCase : List[str] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
__UpperCAmelCase : Optional[Any] = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def A( self):
__UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
__UpperCAmelCase : Tuple = init_image.resize((7_6_8, 5_1_2))
__UpperCAmelCase : Any = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''')
__UpperCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : List[Any] = '''A fantasy landscape, trending on artstation'''
__UpperCAmelCase : Optional[int] = np.random.RandomState(0)
__UpperCAmelCase : str = pipe(
prompt=lowercase__ , image=lowercase__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=lowercase__ , output_type='''np''' , )
__UpperCAmelCase : List[Any] = output.images
__UpperCAmelCase : Tuple = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
__UpperCAmelCase : Tuple = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 675 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 1 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = AutoConfig.from_pretrained(lowercase_ , **lowercase_ )
__UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_config(lowercase_ )
model.save_pretrained(lowercase_ )
AutoTokenizer.from_pretrained(lowercase_ ).save_pretrained(lowercase_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 675 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 | 1 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase :
_lowerCAmelCase : int
_lowerCAmelCase : TreeNode | None = None
_lowerCAmelCase : TreeNode | None = None
lowerCAmelCase = namedtuple("""CoinsDistribResult""", """moves excess""")
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase_ ) != count_coins(lowercase_ ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(lowercase_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = get_distrib(node.left )
__UpperCAmelCase , __UpperCAmelCase : Any = get_distrib(node.right )
__UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess
__UpperCAmelCase : Union[str, Any] = 1 - right_distrib_excess
__UpperCAmelCase : str = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase_ )
+ abs(lowercase_ )
)
__UpperCAmelCase : Union[str, Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase_ , lowercase_ )
return get_distrib(lowercase_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 | 1 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 675 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__)
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
__UpperCAmelCase : Dict = int(lowercase__)
__UpperCAmelCase : Tuple = dict(sorted(self.labels.items()))
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 4.0 , lowercase__ = None , lowercase__ = 5_0 , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : List[str] = len(lowercase__)
__UpperCAmelCase : str = self.transformer.config.sample_size
__UpperCAmelCase : List[str] = self.transformer.config.in_channels
__UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
__UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase__ , device=self.device).reshape(-1)
__UpperCAmelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device)
__UpperCAmelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
__UpperCAmelCase : List[str] = latent_model_input[: len(lowercase__) // 2]
__UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0)
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : Any = t
if not torch.is_tensor(lowercase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase : List[str] = latent_model_input.device.type == '''mps'''
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
__UpperCAmelCase : Any = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , len(lowercase__) // 2 , dim=0)
__UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase : str = torch.cat([half_eps, half_eps] , dim=0)
__UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , lowercase__ , dim=1)
else:
__UpperCAmelCase : Any = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
__UpperCAmelCase : List[Any] = latent_model_input
__UpperCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase : Optional[int] = self.vae.decode(lowercase__).sample
__UpperCAmelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : str = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = analyze_text(lowercase_ )
__UpperCAmelCase : List[str] = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
__UpperCAmelCase : str = sum(single_char_strings.values() )
# one length string
__UpperCAmelCase : List[str] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__UpperCAmelCase : Tuple = single_char_strings[ch]
__UpperCAmelCase : List[Any] = my_str / all_sum
my_fir_sum += prob * math.loga(lowercase_ ) # entropy formula.
# print entropy
print(f"{round(-1 * my_fir_sum ):.1f}" )
# two len string
__UpperCAmelCase : Dict = sum(two_char_strings.values() )
__UpperCAmelCase : Optional[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__UpperCAmelCase : Optional[int] = cha + cha
if sequence in two_char_strings:
__UpperCAmelCase : Optional[int] = two_char_strings[sequence]
__UpperCAmelCase : Union[str, Any] = int(lowercase_ ) / all_sum
my_sec_sum += prob * math.loga(lowercase_ )
# print second entropy
print(f"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(f"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> tuple[dict, dict]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = Counter() # type: ignore
__UpperCAmelCase : Optional[int] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowercase_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 675 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 1 |
import os
import sys
import unittest
lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase = os.path.join(git_repo_path, """src""", """transformers""")
lowerCAmelCase = """
{0} = None
"""
lowerCAmelCase = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
lowerCAmelCase = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : Any = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowercase__)
__UpperCAmelCase : Any = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowercase__ , '''tokenizers''')
__UpperCAmelCase : Dict = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowercase__ , '''tensorflow_text''')
__UpperCAmelCase : Optional[Any] = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowercase__ , '''sentencepiece_and_tokenizers''')
__UpperCAmelCase : Optional[int] = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowercase__ , '''sentencepiece_and_tensorflow_text''')
__UpperCAmelCase : List[Any] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowercase__ , '''sentencepiece_and_tokenizers_and_vision''')
def A( self):
__UpperCAmelCase : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowercase__)
self.assertIn('''tensorflow_text''' , lowercase__)
self.assertIn('''sentencepiece_and_tokenizers''' , lowercase__)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def A( self):
__UpperCAmelCase : int = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowercase__ , '''\nCONSTANT = None\n''')
__UpperCAmelCase : List[str] = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowercase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
__UpperCAmelCase : str = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__UpperCAmelCase : Dict = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__UpperCAmelCase : Union[str, Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowercase__)
| 675 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase = logging.get_logger(__name__)
# General docstring
lowerCAmelCase = """MobileNetV1Config"""
# Base docstring
lowerCAmelCase = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase = [1, 1_024, 7, 7]
# Image classification docstring
lowerCAmelCase = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase = """tabby, tabby cat"""
lowerCAmelCase = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = {}
if isinstance(lowercase_ , lowercase_ ):
__UpperCAmelCase : Dict = model.mobilenet_va
else:
__UpperCAmelCase : List[str] = model
__UpperCAmelCase : Union[str, Any] = '''MobilenetV1/Conv2d_0/'''
__UpperCAmelCase : Union[str, Any] = backbone.conv_stem.convolution.weight
__UpperCAmelCase : Optional[Any] = backbone.conv_stem.normalization.bias
__UpperCAmelCase : Tuple = backbone.conv_stem.normalization.weight
__UpperCAmelCase : Optional[int] = backbone.conv_stem.normalization.running_mean
__UpperCAmelCase : int = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__UpperCAmelCase : List[str] = i + 1
__UpperCAmelCase : Optional[int] = i * 2
__UpperCAmelCase : Any = backbone.layer[pt_index]
__UpperCAmelCase : Optional[Any] = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
__UpperCAmelCase : List[Any] = pointer.convolution.weight
__UpperCAmelCase : Any = pointer.normalization.bias
__UpperCAmelCase : List[str] = pointer.normalization.weight
__UpperCAmelCase : Tuple = pointer.normalization.running_mean
__UpperCAmelCase : Optional[int] = pointer.normalization.running_var
__UpperCAmelCase : Union[str, Any] = backbone.layer[pt_index + 1]
__UpperCAmelCase : str = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
__UpperCAmelCase : int = pointer.convolution.weight
__UpperCAmelCase : List[str] = pointer.normalization.bias
__UpperCAmelCase : int = pointer.normalization.weight
__UpperCAmelCase : int = pointer.normalization.running_mean
__UpperCAmelCase : List[Any] = pointer.normalization.running_var
if isinstance(lowercase_ , lowercase_ ):
__UpperCAmelCase : Any = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
__UpperCAmelCase : Any = model.classifier.weight
__UpperCAmelCase : Optional[Any] = model.classifier.bias
return tf_to_pt_map
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
__UpperCAmelCase : Any = tf.train.list_variables(lowercase_ )
__UpperCAmelCase : Union[str, Any] = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
__UpperCAmelCase : Optional[int] = tf.train.load_variable(lowercase_ , lowercase_ )
__UpperCAmelCase : Any = array
# Build TF to PyTorch weights loading map
__UpperCAmelCase : Optional[int] = _build_tf_to_pytorch_map(lowercase_ , lowercase_ , lowercase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
__UpperCAmelCase : Dict = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
__UpperCAmelCase : List[Any] = np.transpose(lowercase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
__UpperCAmelCase : Optional[Any] = array.squeeze().transpose()
else:
__UpperCAmelCase : str = np.transpose(lowercase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
__UpperCAmelCase : List[str] = torch.from_numpy(lowercase_ )
tf_weights.pop(lowercase_ , lowercase_ )
tf_weights.pop(name + '''/RMSProp''' , lowercase_ )
tf_weights.pop(name + '''/RMSProp_1''' , lowercase_ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , lowercase_ )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> torch.Tensor:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = features.shape[-2:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = conv_layer.stride
__UpperCAmelCase , __UpperCAmelCase : Any = conv_layer.kernel_size
if in_height % stride_height == 0:
__UpperCAmelCase : str = max(kernel_height - stride_height , 0 )
else:
__UpperCAmelCase : List[str] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__UpperCAmelCase : List[Any] = max(kernel_width - stride_width , 0 )
else:
__UpperCAmelCase : Tuple = max(kernel_width - (in_width % stride_width) , 0 )
__UpperCAmelCase : Union[str, Any] = pad_along_width // 2
__UpperCAmelCase : str = pad_along_width - pad_left
__UpperCAmelCase : List[Any] = pad_along_height // 2
__UpperCAmelCase : Union[str, Any] = pad_along_height - pad_top
__UpperCAmelCase : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowercase_ , lowercase_ , '''constant''' , 0.0 )
class lowerCamelCase ( nn.Module ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1 , lowercase__ = 1 , lowercase__ = False , lowercase__ = True , lowercase__ = True , ):
super().__init__()
__UpperCAmelCase : int = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
__UpperCAmelCase : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2)
__UpperCAmelCase : List[str] = nn.Convad(
in_channels=lowercase__ , out_channels=lowercase__ , kernel_size=lowercase__ , stride=lowercase__ , padding=lowercase__ , groups=lowercase__ , bias=lowercase__ , padding_mode='''zeros''' , )
if use_normalization:
__UpperCAmelCase : Any = nn.BatchNormad(
num_features=lowercase__ , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=lowercase__ , track_running_stats=lowercase__ , )
else:
__UpperCAmelCase : Optional[Any] = None
if use_activation:
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowercase__):
__UpperCAmelCase : Optional[int] = ACTaFN[config.hidden_act]
else:
__UpperCAmelCase : Optional[int] = config.hidden_act
else:
__UpperCAmelCase : List[Any] = None
def A( self , lowercase__):
if self.config.tf_padding:
__UpperCAmelCase : Optional[int] = apply_tf_padding(lowercase__ , self.convolution)
__UpperCAmelCase : Dict = self.convolution(lowercase__)
if self.normalization is not None:
__UpperCAmelCase : int = self.normalization(lowercase__)
if self.activation is not None:
__UpperCAmelCase : str = self.activation(lowercase__)
return features
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Dict = MobileNetVaConfig
_lowerCAmelCase : int = load_tf_weights_in_mobilenet_va
_lowerCAmelCase : List[str] = '''mobilenet_v1'''
_lowerCAmelCase : Optional[Any] = '''pixel_values'''
_lowerCAmelCase : Dict = False
def A( self , lowercase__):
if isinstance(lowercase__ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase__ , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
lowerCAmelCase = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , _UpperCamelCase , )
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ = True):
super().__init__(lowercase__)
__UpperCAmelCase : Tuple = config
__UpperCAmelCase : Tuple = 3_2
__UpperCAmelCase : int = max(int(depth * config.depth_multiplier) , config.min_depth)
__UpperCAmelCase : List[str] = MobileNetVaConvLayer(
lowercase__ , in_channels=config.num_channels , out_channels=lowercase__ , kernel_size=3 , stride=2 , )
__UpperCAmelCase : int = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__UpperCAmelCase : Union[str, Any] = nn.ModuleList()
for i in range(1_3):
__UpperCAmelCase : List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__UpperCAmelCase : int = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
lowercase__ , in_channels=lowercase__ , out_channels=lowercase__ , kernel_size=3 , stride=strides[i] , groups=lowercase__ , ))
self.layer.append(
MobileNetVaConvLayer(
lowercase__ , in_channels=lowercase__ , out_channels=lowercase__ , kernel_size=1 , ))
__UpperCAmelCase : Optional[int] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def A( self , lowercase__):
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowercase__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A( self , lowercase__ = None , lowercase__ = None , lowercase__ = None , ):
__UpperCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''')
__UpperCAmelCase : Optional[int] = self.conv_stem(lowercase__)
__UpperCAmelCase : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
__UpperCAmelCase : str = layer_module(lowercase__)
if output_hidden_states:
__UpperCAmelCase : Tuple = all_hidden_states + (hidden_states,)
__UpperCAmelCase : Union[str, Any] = hidden_states
if self.pooler is not None:
__UpperCAmelCase : Optional[int] = torch.flatten(self.pooler(lowercase__) , start_dim=1)
else:
__UpperCAmelCase : List[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase__ , pooler_output=lowercase__ , hidden_states=lowercase__ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _UpperCamelCase , )
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__):
super().__init__(lowercase__)
__UpperCAmelCase : str = config.num_labels
__UpperCAmelCase : Optional[int] = MobileNetVaModel(lowercase__)
__UpperCAmelCase : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__UpperCAmelCase : Optional[Any] = nn.Dropout(config.classifier_dropout_prob , inplace=lowercase__)
__UpperCAmelCase : Tuple = nn.Linear(lowercase__ , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A( self , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , ):
__UpperCAmelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Any = self.mobilenet_va(lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__)
__UpperCAmelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
__UpperCAmelCase : Optional[int] = self.classifier(self.dropout(lowercase__))
__UpperCAmelCase : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__UpperCAmelCase : int = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__UpperCAmelCase : List[Any] = '''single_label_classification'''
else:
__UpperCAmelCase : List[str] = '''multi_label_classification'''
if self.config.problem_type == "regression":
__UpperCAmelCase : List[str] = MSELoss()
if self.num_labels == 1:
__UpperCAmelCase : List[Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
__UpperCAmelCase : str = loss_fct(lowercase__ , lowercase__)
elif self.config.problem_type == "single_label_classification":
__UpperCAmelCase : str = CrossEntropyLoss()
__UpperCAmelCase : List[str] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__UpperCAmelCase : Union[str, Any] = BCEWithLogitsLoss()
__UpperCAmelCase : Tuple = loss_fct(lowercase__ , lowercase__)
if not return_dict:
__UpperCAmelCase : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowercase__ , logits=lowercase__ , hidden_states=outputs.hidden_states , )
| 675 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger("""transformers.models.encodec""")
lowerCAmelCase = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
lowerCAmelCase = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
lowerCAmelCase = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
lowerCAmelCase = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
lowerCAmelCase = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
lowerCAmelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCAmelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCAmelCase = []
lowerCAmelCase = []
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCAmelCase : Dict = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
__UpperCAmelCase : int = getattr(lowercase_ , lowercase_ ).shape
else:
__UpperCAmelCase : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
__UpperCAmelCase : List[str] = value
elif weight_type == "weight_v":
__UpperCAmelCase : List[str] = value
elif weight_type == "bias":
__UpperCAmelCase : Dict = value
elif weight_type == "running_mean":
__UpperCAmelCase : Optional[Any] = value
elif weight_type == "running_var":
__UpperCAmelCase : Tuple = value
elif weight_type == "num_batches_tracked":
__UpperCAmelCase : Any = value
elif weight_type == "weight_ih_l0":
__UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_hh_l0":
__UpperCAmelCase : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
__UpperCAmelCase : List[Any] = value
elif weight_type == "bias_hh_l0":
__UpperCAmelCase : str = value
elif weight_type == "weight_ih_l1":
__UpperCAmelCase : Dict = value
elif weight_type == "weight_hh_l1":
__UpperCAmelCase : str = value
elif weight_type == "bias_ih_l1":
__UpperCAmelCase : List[str] = value
elif weight_type == "bias_hh_l1":
__UpperCAmelCase : Optional[int] = value
else:
__UpperCAmelCase : Union[str, Any] = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : str = []
if model_name == "encodec_24khz" or "encodec_32khz":
__UpperCAmelCase : Union[str, Any] = MAPPING_24K
elif model_name == "encodec_48khz":
__UpperCAmelCase : Dict = MAPPING_48K
else:
raise ValueError(f"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(lowercase_ , lowercase_ ):
logger.info(f"{name} was ignored" )
continue
__UpperCAmelCase : Optional[Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = key.split('''.*.''' )
if prefix in name and suffix in name:
__UpperCAmelCase : List[str] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
__UpperCAmelCase : Tuple = True
if "*" in mapped_key:
__UpperCAmelCase : Dict = name.split(lowercase_ )[0].split('''.''' )[-2]
__UpperCAmelCase : Tuple = mapped_key.replace('''*''' , lowercase_ )
if "weight_g" in name:
__UpperCAmelCase : Optional[int] = '''weight_g'''
elif "weight_v" in name:
__UpperCAmelCase : List[Any] = '''weight_v'''
elif "weight_ih_l0" in name:
__UpperCAmelCase : Dict = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
__UpperCAmelCase : List[Any] = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
__UpperCAmelCase : str = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
__UpperCAmelCase : Any = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
__UpperCAmelCase : Optional[Any] = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
__UpperCAmelCase : Optional[Any] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
__UpperCAmelCase : str = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
__UpperCAmelCase : Optional[Any] = '''bias_hh_l1'''
elif "bias" in name:
__UpperCAmelCase : Tuple = '''bias'''
elif "weight" in name:
__UpperCAmelCase : str = '''weight'''
elif "running_mean" in name:
__UpperCAmelCase : Any = '''running_mean'''
elif "running_var" in name:
__UpperCAmelCase : int = '''running_var'''
elif "num_batches_tracked" in name:
__UpperCAmelCase : Dict = '''num_batches_tracked'''
else:
__UpperCAmelCase : Optional[Any] = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"Unused weights: {unused_weights}" )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> Any:
'''simple docstring'''
if config_path is not None:
__UpperCAmelCase : int = EncodecConfig.from_pretrained(lowercase_ )
else:
__UpperCAmelCase : Dict = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__UpperCAmelCase : Optional[int] = [8, 5, 4, 4]
__UpperCAmelCase : str = [2.2]
__UpperCAmelCase : Optional[int] = 64
__UpperCAmelCase : str = 32000
__UpperCAmelCase : Optional[Any] = 2048
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Tuple = False
elif model_name == "encodec_48khz":
__UpperCAmelCase : Tuple = [8, 5, 4, 2]
__UpperCAmelCase : str = [3.0, 6.0, 1_2.0, 2_4.0]
__UpperCAmelCase : str = 48000
__UpperCAmelCase : List[Any] = 2
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : str = '''time_group_norm'''
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Optional[int] = 1.0
__UpperCAmelCase : Optional[int] = 0.0_1
else:
raise ValueError(f"Unknown model name: {model_name}" )
__UpperCAmelCase : str = EncodecModel(lowercase_ )
__UpperCAmelCase : Optional[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowercase_ )
__UpperCAmelCase : int = torch.load(lowercase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__UpperCAmelCase : Optional[int] = original_checkpoint['''best_state''']
recursively_load_weights(lowercase_ , lowercase_ , lowercase_ )
model.save_pretrained(lowercase_ )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 675 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 | 1 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : int = CpmAntTokenizer
_lowerCAmelCase : Union[str, Any] = False
def A( self):
super().setUp()
__UpperCAmelCase : Any = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
@tooslow
def A( self):
__UpperCAmelCase : Tuple = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''')
__UpperCAmelCase : List[Any] = '''今天天气真好!'''
__UpperCAmelCase : Union[str, Any] = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__UpperCAmelCase : str = tokenizer.tokenize(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
__UpperCAmelCase : Optional[Any] = '''今天天气真好!'''
__UpperCAmelCase : Optional[int] = [tokenizer.bos_token] + tokens
__UpperCAmelCase : Tuple = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__) , lowercase__)
__UpperCAmelCase : List[Any] = tokenizer.decode(lowercase__)
self.assertEqual(lowercase__ , lowercase__)
| 675 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : int = GPTSwaTokenizer
_lowerCAmelCase : Any = False
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Optional[Any] = False
def A( self):
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Tuple = GPTSwaTokenizer(lowercase__ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def A( self , lowercase__):
__UpperCAmelCase : Dict = '''This is a test'''
__UpperCAmelCase : Tuple = '''This is a test'''
return input_text, output_text
def A( self):
__UpperCAmelCase : List[str] = '''<s>'''
__UpperCAmelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__) , lowercase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__) , lowercase__)
def A( self):
__UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(lowercase__) , 2_0_0_0)
def A( self):
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0)
def A( self):
__UpperCAmelCase : Optional[int] = GPTSwaTokenizer(lowercase__)
__UpperCAmelCase : Optional[int] = tokenizer.tokenize('''This is a test''')
self.assertListEqual(lowercase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2])
__UpperCAmelCase : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
lowercase__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
__UpperCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(lowercase__)
self.assertListEqual(
lowercase__ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
__UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(lowercase__)
# fmt: off
self.assertListEqual(
lowercase__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def A( self):
__UpperCAmelCase : Optional[int] = GPTSwaTokenizer(lowercase__)
__UpperCAmelCase : str = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
__UpperCAmelCase : str = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowercase__ , lowercase__):
self.assertListEqual(tokenizer.encode_fast(lowercase__) , lowercase__)
# Test that decode_fast returns the input text
for text, token_ids in zip(lowercase__ , lowercase__):
self.assertEqual(tokenizer.decode_fast(lowercase__) , lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
__UpperCAmelCase : int = {'''input_ids''': [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=lowercase__ , )
| 675 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 | 1 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __SCREAMING_SNAKE_CASE ( ) -> tuple[list[int], int]:
'''simple docstring'''
__UpperCAmelCase : Any = [randint(-1000 , 1000 ) for i in range(10 )]
__UpperCAmelCase : Union[str, Any] = randint(-5000 , 5000 )
return (arr, r)
lowerCAmelCase = make_dataset()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int, ...]:
'''simple docstring'''
for triplet in permutations(lowercase_ , 3 ):
if sum(lowercase_ ) == target:
return tuple(sorted(lowercase_ ) )
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int, int, int]:
'''simple docstring'''
arr.sort()
__UpperCAmelCase : Tuple = len(lowercase_ )
for i in range(n - 1 ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( ) -> tuple[float, float]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__UpperCAmelCase : Union[str, Any] = '''
triplet_sum1(*dataset)
'''
__UpperCAmelCase : Tuple = '''
triplet_sum2(*dataset)
'''
__UpperCAmelCase : List[Any] = repeat(setup=lowercase_ , stmt=lowercase_ , repeat=5 , number=10000 )
__UpperCAmelCase : Union[str, Any] = repeat(setup=lowercase_ , stmt=lowercase_ , repeat=5 , number=10000 )
return (min(lowercase_ ), min(lowercase_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 675 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
_lowerCAmelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
_lowerCAmelCase : str = "audio"
_lowerCAmelCase : str = "transcription"
def A( self , lowercase__):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , lowercase__):
raise ValueError(F"Column {self.audio_column} is not an Audio type.")
__UpperCAmelCase : Union[str, Any] = copy.deepcopy(self)
__UpperCAmelCase : int = self.input_schema.copy()
__UpperCAmelCase : List[Any] = features[self.audio_column]
__UpperCAmelCase : int = input_schema
return task_template
@property
def A( self):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase = """naver-clova-ix/donut-base"""
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : List[str] = DonutProcessor.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase : Dict = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
__UpperCAmelCase : Union[str, Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
__UpperCAmelCase : Dict = self.processor.tokenajson(lowercase__)
self.assertDictEqual(lowercase__ , lowercase__)
| 675 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[Any] = '''lxmert'''
_lowerCAmelCase : Optional[int] = {}
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=9_5_0_0 , lowercase__=1_6_0_0 , lowercase__=4_0_0 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=9 , lowercase__=5 , lowercase__=5 , lowercase__=2_0_4_8 , lowercase__=4 , lowercase__=6.6_7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , **lowercase__ , ):
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : Optional[Any] = num_qa_labels
__UpperCAmelCase : List[Any] = num_object_labels
__UpperCAmelCase : Optional[Any] = num_attr_labels
__UpperCAmelCase : Union[str, Any] = l_layers
__UpperCAmelCase : Any = x_layers
__UpperCAmelCase : Optional[int] = r_layers
__UpperCAmelCase : int = visual_feat_dim
__UpperCAmelCase : int = visual_pos_dim
__UpperCAmelCase : int = visual_loss_normalizer
__UpperCAmelCase : int = task_matched
__UpperCAmelCase : str = task_mask_lm
__UpperCAmelCase : Tuple = task_obj_predict
__UpperCAmelCase : Tuple = task_qa
__UpperCAmelCase : List[str] = visual_obj_loss
__UpperCAmelCase : Optional[Any] = visual_attr_loss
__UpperCAmelCase : Union[str, Any] = visual_feat_loss
__UpperCAmelCase : int = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**lowercase__)
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : int = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : List[Any] = seq_length
__UpperCAmelCase : List[Any] = is_training
__UpperCAmelCase : Any = use_input_mask
__UpperCAmelCase : List[Any] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : Any = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : Any = num_labels
__UpperCAmelCase : List[Any] = num_choices
__UpperCAmelCase : Tuple = scope
def A( self):
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
__UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = DistilBertModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , lowercase__)
__UpperCAmelCase : Optional[int] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Dict = DistilBertForMaskedLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = DistilBertForQuestionAnswering(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(
lowercase__ , attention_mask=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : int = DistilBertForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : str = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : Union[str, Any] = DistilBertForTokenClassification(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = self.num_choices
__UpperCAmelCase : Optional[Any] = DistilBertForMultipleChoice(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Tuple = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCAmelCase : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCAmelCase : List[str] = model(
lowercase__ , attention_mask=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A( self):
__UpperCAmelCase : int = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Dict = config_and_inputs
__UpperCAmelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[Any] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowerCAmelCase : Dict = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : str = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = True
def A( self):
__UpperCAmelCase : int = DistilBertModelTester(self)
__UpperCAmelCase : str = ConfigTester(self , config_class=lowercase__ , dim=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase__)
def A( self):
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase__)
def A( self):
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase__)
def A( self):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase__)
def A( self):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase__)
@slow
def A( self):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[str] = DistilBertModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
@slow
@require_torch_gpu
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__UpperCAmelCase : str = True
__UpperCAmelCase : str = model_class(config=lowercase__)
__UpperCAmelCase : str = self._prepare_for_class(lowercase__ , lowercase__)
__UpperCAmelCase : Any = torch.jit.trace(
lowercase__ , (inputs_dict['''input_ids'''].to('''cpu'''), inputs_dict['''attention_mask'''].to('''cpu''')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ , os.path.join(lowercase__ , '''traced_model.pt'''))
__UpperCAmelCase : Optional[int] = torch.jit.load(os.path.join(lowercase__ , '''traced_model.pt''') , map_location=lowercase__)
loaded(inputs_dict['''input_ids'''].to(lowercase__) , inputs_dict['''attention_mask'''].to(lowercase__))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : List[str] = DistilBertModel.from_pretrained('''distilbert-base-uncased''')
__UpperCAmelCase : Tuple = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
__UpperCAmelCase : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
__UpperCAmelCase : Dict = model(lowercase__ , attention_mask=lowercase__)[0]
__UpperCAmelCase : int = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : List[str] = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase__ , atol=1e-4))
| 675 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES
_lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Tuple = ['''input_ids''', '''attention_mask''']
_lowerCAmelCase : List[str] = None
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ):
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , )
__UpperCAmelCase : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , lowercase__) != add_prefix_space:
__UpperCAmelCase : List[Any] = getattr(lowercase__ , pre_tok_state.pop('''type'''))
__UpperCAmelCase : str = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**lowercase__)
__UpperCAmelCase : Optional[int] = add_prefix_space
def A( self , *lowercase__ , **lowercase__):
__UpperCAmelCase : Optional[int] = kwargs.get('''is_split_into_words''' , lowercase__)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
''' pretokenized inputs.''')
return super()._batch_encode_plus(*lowercase__ , **lowercase__)
def A( self , *lowercase__ , **lowercase__):
__UpperCAmelCase : Union[str, Any] = kwargs.get('''is_split_into_words''' , lowercase__)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
''' pretokenized inputs.''')
return super()._encode_plus(*lowercase__ , **lowercase__)
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(lowercase__ , name=lowercase__)
return tuple(lowercase__)
def A( self , lowercase__):
__UpperCAmelCase : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__) + [self.eos_token_id])
if len(lowercase__) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
return input_ids
| 675 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__ = 1_3 , lowercase__ = 6_4 , lowercase__ = 2 , lowercase__ = 3 , lowercase__ = 3 , lowercase__ = True , lowercase__ = True , lowercase__ = 1_2_8 , lowercase__=[1_6, 3_2, 6_4, 1_2_8] , lowercase__ = 7 , lowercase__ = 4 , lowercase__ = 3_7 , lowercase__ = "gelu" , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 1_0 , lowercase__ = 0.0_2 , lowercase__ = 2 , lowercase__ = 1 , lowercase__ = 1_2_8 , lowercase__ = [2, 2, 2, 2] , lowercase__ = 2 , lowercase__ = 2 , ):
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : int = patch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = type_sequence_label_size
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[Any] = encoder_stride
__UpperCAmelCase : Optional[Any] = num_attention_outputs
__UpperCAmelCase : Union[str, Any] = embed_dim
__UpperCAmelCase : List[str] = embed_dim + 1
__UpperCAmelCase : int = resolution
__UpperCAmelCase : str = depths
__UpperCAmelCase : Optional[Any] = hidden_sizes
__UpperCAmelCase : Optional[Any] = dim
__UpperCAmelCase : Any = mlp_expansion_ratio
def A( self):
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : int = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def A( self):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = TFEfficientFormerModel(config=lowercase__)
__UpperCAmelCase : Dict = model(lowercase__ , training=lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : int = self.type_sequence_label_size
__UpperCAmelCase : Optional[int] = TFEfficientFormerForImageClassification(lowercase__)
__UpperCAmelCase : Any = model(lowercase__ , labels=lowercase__ , training=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__UpperCAmelCase : str = 1
__UpperCAmelCase : Any = TFEfficientFormerForImageClassification(lowercase__)
__UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__UpperCAmelCase : Any = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = config_and_inputs
__UpperCAmelCase : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Tuple = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : List[str] = False
def A( self):
__UpperCAmelCase : Tuple = TFEfficientFormerModelTester(self)
__UpperCAmelCase : Tuple = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def A( self):
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def A( self):
pass
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = model_class(lowercase__)
__UpperCAmelCase : Any = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__)
def A( self):
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = model_class(lowercase__)
__UpperCAmelCase : Dict = model(**self._prepare_for_class(lowercase__ , lowercase__) , training=lowercase__)
__UpperCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(lowercase__) , lowercase__)
if hasattr(self.model_tester , '''encoder_seq_length'''):
__UpperCAmelCase : Optional[int] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
__UpperCAmelCase : List[str] = seq_length * self.model_tester.chunk_length
else:
__UpperCAmelCase : str = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__UpperCAmelCase : Tuple = outputs.decoder_hidden_states
self.asseretIsInstance(lowercase__ , (list, tuple))
self.assertEqual(len(lowercase__) , lowercase__)
__UpperCAmelCase : int = getattr(self.model_tester , '''seq_length''' , lowercase__)
__UpperCAmelCase : Union[str, Any] = getattr(self.model_tester , '''decoder_seq_length''' , lowercase__)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : List[Any] = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__=False):
__UpperCAmelCase : Tuple = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A( self):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def A( self):
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__)
@slow
def A( self):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : str = TFEfficientFormerModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = True
__UpperCAmelCase : int = getattr(self.model_tester , '''seq_length''' , lowercase__)
__UpperCAmelCase : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , lowercase__)
__UpperCAmelCase : List[Any] = getattr(self.model_tester , '''key_length''' , lowercase__)
__UpperCAmelCase : Any = getattr(self.model_tester , '''chunk_length''' , lowercase__)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
__UpperCAmelCase : List[Any] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Any = True
__UpperCAmelCase : List[str] = model_class(lowercase__)
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(lowercase__ , lowercase__) , training=lowercase__)
__UpperCAmelCase : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase__) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Optional[int] = model_class(lowercase__)
__UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(lowercase__ , lowercase__) , training=lowercase__)
__UpperCAmelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase__) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def A( self):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__UpperCAmelCase : List[Any] = model_class(lowercase__)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__UpperCAmelCase : Optional[int] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowercase__)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__UpperCAmelCase : Union[str, Any] = model(lowercase__)
self.assertTrue(outputs_dict is not None)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def A( self):
__UpperCAmelCase : List[str] = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
__UpperCAmelCase : str = self.default_image_processor
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : List[str] = image_processor(images=lowercase__ , return_tensors='''tf''')
# forward pass
__UpperCAmelCase : Any = model(**lowercase__ , training=lowercase__)
# verify the logits
__UpperCAmelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowercase__)
__UpperCAmelCase : List[str] = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2])
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
__UpperCAmelCase : Optional[Any] = self.default_image_processor
__UpperCAmelCase : Any = prepare_img()
__UpperCAmelCase : List[Any] = image_processor(images=lowercase__ , return_tensors='''tf''')
# forward pass
__UpperCAmelCase : Union[str, Any] = model(**lowercase__ , training=lowercase__)
# verify the logits
__UpperCAmelCase : Union[str, Any] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowercase__)
__UpperCAmelCase : str = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9])
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4))
| 675 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 1 |
from __future__ import annotations
import math
lowerCAmelCase = """2020.9.26"""
lowerCAmelCase = """xcodz-dot, cclaus, dhruvmanila"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, float]:
'''simple docstring'''
if not all(isinstance(lowercase_ , (float, int) ) for val in locals().values() ):
__UpperCAmelCase : Tuple = f"Input values must either be float or int: {list(locals().values() )}"
raise TypeError(lowercase_ )
__UpperCAmelCase : List[Any] = ((x * distance) / (z + distance)) * scale
__UpperCAmelCase : Tuple = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, float, float]:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('''Axis must be a str''' )
__UpperCAmelCase : Any = locals()
del input_variables["axis"]
if not all(isinstance(lowercase_ , (float, int) ) for val in input_variables.values() ):
__UpperCAmelCase : Union[str, Any] = (
'''Input values except axis must either be float or int: '''
f"{list(input_variables.values() )}"
)
raise TypeError(lowercase_ )
__UpperCAmelCase : Tuple = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__UpperCAmelCase : Any = x * math.cos(lowercase_ ) - y * math.sin(lowercase_ )
__UpperCAmelCase : Tuple = y * math.cos(lowercase_ ) + x * math.sin(lowercase_ )
__UpperCAmelCase : List[Any] = z
elif axis == "x":
__UpperCAmelCase : Union[str, Any] = y * math.cos(lowercase_ ) - z * math.sin(lowercase_ )
__UpperCAmelCase : str = z * math.cos(lowercase_ ) + y * math.sin(lowercase_ )
__UpperCAmelCase : Any = x
elif axis == "y":
__UpperCAmelCase : str = x * math.cos(lowercase_ ) - z * math.sin(lowercase_ )
__UpperCAmelCase : Any = z * math.cos(lowercase_ ) + x * math.sin(lowercase_ )
__UpperCAmelCase : str = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }')
print(F'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }')
| 675 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : int = [1]
for i in range(2 , lowercase_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[Any] = list(range(lowercase_ ) )
# Find permutation
while factorials:
__UpperCAmelCase : Optional[Any] = factorials.pop()
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = divmod(lowercase_ , lowercase_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase ( datasets.BuilderConfig ):
_lowerCAmelCase : Optional[datasets.Features] = None
_lowerCAmelCase : str = "utf-8"
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = True # deprecated
_lowerCAmelCase : Optional[int] = None # deprecated
_lowerCAmelCase : int = 1_0 << 2_0 # 10MB
_lowerCAmelCase : Optional[bool] = None
class lowerCamelCase ( datasets.ArrowBasedBuilder ):
_lowerCAmelCase : Optional[Any] = JsonConfig
def A( self):
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''')
__UpperCAmelCase : Optional[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''')
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''')
return datasets.DatasetInfo(features=self.config.features)
def A( self , lowercase__):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}")
__UpperCAmelCase : Optional[int] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowercase__ , (str, list, tuple)):
__UpperCAmelCase : Any = data_files
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = [files]
__UpperCAmelCase : str = [dl_manager.iter_files(lowercase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
__UpperCAmelCase : int = []
for split_name, files in data_files.items():
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : str = [files]
__UpperCAmelCase : Dict = [dl_manager.iter_files(lowercase__) for file in files]
splits.append(datasets.SplitGenerator(name=lowercase__ , gen_kwargs={'''files''': files}))
return splits
def A( self , lowercase__):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
__UpperCAmelCase : Optional[int] = self.config.features.arrow_schema.field(lowercase__).type
__UpperCAmelCase : List[Any] = pa_table.append_column(lowercase__ , pa.array([None] * len(lowercase__) , type=lowercase__))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__UpperCAmelCase : str = table_cast(lowercase__ , self.config.features.arrow_schema)
return pa_table
def A( self , lowercase__):
for file_idx, file in enumerate(itertools.chain.from_iterable(lowercase__)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowercase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
__UpperCAmelCase : int = json.load(lowercase__)
# We keep only the field we are interested in
__UpperCAmelCase : List[Any] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowercase__ , (list, tuple)):
__UpperCAmelCase : Any = set().union(*[row.keys() for row in dataset])
__UpperCAmelCase : str = {col: [row.get(lowercase__) for row in dataset] for col in keys}
else:
__UpperCAmelCase : List[str] = dataset
__UpperCAmelCase : Any = pa.Table.from_pydict(lowercase__)
yield file_idx, self._cast_table(lowercase__)
# If the file has one json object per line
else:
with open(lowercase__ , '''rb''') as f:
__UpperCAmelCase : Any = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__UpperCAmelCase : Dict = max(self.config.chunksize // 3_2 , 1_6 << 1_0)
__UpperCAmelCase : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
__UpperCAmelCase : Optional[Any] = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowercase__)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__UpperCAmelCase : Optional[Any] = batch.decode(self.config.encoding , errors=lowercase__).encode('''utf-8''')
try:
while True:
try:
__UpperCAmelCase : str = paj.read_json(
io.BytesIO(lowercase__) , read_options=paj.ReadOptions(block_size=lowercase__))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowercase__ , pa.ArrowInvalid)
and "straddling" not in str(lowercase__)
or block_size > len(lowercase__)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"Batch of {len(lowercase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowercase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
__UpperCAmelCase : int = json.load(lowercase__)
except json.JSONDecodeError:
logger.error(F"Failed to read file '{file}' with error {type(lowercase__)}: {e}")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowercase__ , lowercase__): # list is the only sequence type supported in JSON
try:
__UpperCAmelCase : Optional[Any] = set().union(*[row.keys() for row in dataset])
__UpperCAmelCase : Union[str, Any] = {col: [row.get(lowercase__) for row in dataset] for col in keys}
__UpperCAmelCase : Union[str, Any] = pa.Table.from_pydict(lowercase__)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"Failed to read file '{file}' with error {type(lowercase__)}: {e}")
raise ValueError(F"Not able to read records in the JSON file at {file}.") from None
yield file_idx, self._cast_table(lowercase__)
break
else:
logger.error(F"Failed to read file '{file}' with error {type(lowercase__)}: {e}")
raise ValueError(
F"Not able to read records in the JSON file at {file}. "
F"You should probably indicate the field of the JSON file containing your records. "
F"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
F"Select the correct one and provide it as `field='XXX'` to the dataset loading method. ") from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowercase__)
batch_idx += 1
| 675 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 | 1 |
from timeit import timeit
lowerCAmelCase = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Any = len(lowercase_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : Any = len(lowercase_ ) // 2
__UpperCAmelCase : Any = len(lowercase_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
'''simple docstring'''
if len(lowercase_ ) <= 2:
return True
if s[0] == s[len(lowercase_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
'''simple docstring'''
return s == s[::-1]
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
__UpperCAmelCase : int = f"all({name}(key) is value for key, value in test_data.items())"
__UpperCAmelCase : Optional[int] = f"from __main__ import test_data, {name}"
__UpperCAmelCase : Optional[Any] = 500000
__UpperCAmelCase : List[Any] = timeit(stmt=lowercase_ , setup=lowercase_ , number=lowercase_ )
print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'{key:21} {value}')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 675 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__)
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
__UpperCAmelCase : Dict = int(lowercase__)
__UpperCAmelCase : Tuple = dict(sorted(self.labels.items()))
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 4.0 , lowercase__ = None , lowercase__ = 5_0 , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : List[str] = len(lowercase__)
__UpperCAmelCase : str = self.transformer.config.sample_size
__UpperCAmelCase : List[str] = self.transformer.config.in_channels
__UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
__UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase__ , device=self.device).reshape(-1)
__UpperCAmelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device)
__UpperCAmelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
__UpperCAmelCase : List[str] = latent_model_input[: len(lowercase__) // 2]
__UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0)
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : Any = t
if not torch.is_tensor(lowercase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase : List[str] = latent_model_input.device.type == '''mps'''
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
__UpperCAmelCase : Any = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , len(lowercase__) // 2 , dim=0)
__UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase : str = torch.cat([half_eps, half_eps] , dim=0)
__UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , lowercase__ , dim=1)
else:
__UpperCAmelCase : Any = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
__UpperCAmelCase : List[Any] = latent_model_input
__UpperCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase : Optional[int] = self.vae.decode(lowercase__).sample
__UpperCAmelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : str = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Optional[int] = ['''pixel_values''']
def __init__( self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = True , lowercase__ = 1 / 2_5_5 , lowercase__ = True , lowercase__ = None , lowercase__ = None , lowercase__ = True , **lowercase__ , ):
super().__init__(**lowercase__)
__UpperCAmelCase : Any = size if size is not None else {'''shortest_edge''': 2_2_4}
__UpperCAmelCase : str = get_size_dict(lowercase__ , default_to_square=lowercase__)
__UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__UpperCAmelCase : str = get_size_dict(lowercase__ , default_to_square=lowercase__ , param_name='''crop_size''')
__UpperCAmelCase : List[Any] = do_resize
__UpperCAmelCase : List[Any] = size
__UpperCAmelCase : Optional[int] = resample
__UpperCAmelCase : List[Any] = do_center_crop
__UpperCAmelCase : Union[str, Any] = crop_size
__UpperCAmelCase : Optional[int] = do_rescale
__UpperCAmelCase : Optional[int] = rescale_factor
__UpperCAmelCase : List[Any] = do_normalize
__UpperCAmelCase : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCAmelCase : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCAmelCase : List[str] = do_convert_rgb
def A( self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
__UpperCAmelCase : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
__UpperCAmelCase : str = get_resize_output_image_size(lowercase__ , size=size['''shortest_edge'''] , default_to_square=lowercase__)
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
__UpperCAmelCase : str = get_size_dict(lowercase__)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}")
return center_crop(lowercase__ , size=(size['''height'''], size['''width''']) , data_format=lowercase__ , **lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__)
def A( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
__UpperCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Dict = size if size is not None else self.size
__UpperCAmelCase : Tuple = get_size_dict(lowercase__ , param_name='''size''' , default_to_square=lowercase__)
__UpperCAmelCase : str = resample if resample is not None else self.resample
__UpperCAmelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : Dict = get_size_dict(lowercase__ , param_name='''crop_size''' , default_to_square=lowercase__)
__UpperCAmelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : List[str] = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
__UpperCAmelCase : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCAmelCase : Union[str, Any] = make_list_of_images(lowercase__)
if not valid_images(lowercase__):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCAmelCase : Optional[Any] = [convert_to_rgb(lowercase__) for image in images]
# All transformations expect numpy arrays.
__UpperCAmelCase : Tuple = [to_numpy_array(lowercase__) for image in images]
if do_resize:
__UpperCAmelCase : Dict = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__) for image in images]
if do_center_crop:
__UpperCAmelCase : Union[str, Any] = [self.center_crop(image=lowercase__ , size=lowercase__) for image in images]
if do_rescale:
__UpperCAmelCase : List[str] = [self.rescale(image=lowercase__ , scale=lowercase__) for image in images]
if do_normalize:
__UpperCAmelCase : List[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__) for image in images]
__UpperCAmelCase : Optional[int] = [to_channel_dimension_format(lowercase__ , lowercase__) for image in images]
__UpperCAmelCase : int = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__)
| 675 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ = 4 ) -> list[list[int]]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = abs(lowercase_ ) or 4
return [[1 + x + y * row_size for x in range(lowercase_ )] for y in range(lowercase_ )]
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(lowercase_ ) )
# OR.. transpose(reverse_column(matrix))
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(lowercase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(lowercase_ ) )
# OR.. transpose(reverse_row(matrix))
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[list[int]]:
'''simple docstring'''
__UpperCAmelCase : Any = [list(lowercase_ ) for x in zip(*lowercase_ )]
return matrix
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[list[int]]:
'''simple docstring'''
__UpperCAmelCase : Tuple = matrix[::-1]
return matrix
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[list[int]]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [x[::-1] for x in matrix]
return matrix
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
for i in matrix:
print(*lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
lowerCAmelCase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
lowerCAmelCase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 675 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 1 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : int = GPTSanJapaneseTokenizer
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Tuple = {'''do_clean_text''': False, '''add_prefix_space''': False}
def A( self):
super().setUp()
# fmt: off
__UpperCAmelCase : Union[str, Any] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
__UpperCAmelCase : str = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
__UpperCAmelCase : Union[str, Any] = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
with open(self.emoji_file , '''w''') as emoji_writer:
emoji_writer.write(json.dumps(lowercase__))
def A( self , **lowercase__):
kwargs.update(self.special_tokens_map)
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowercase__)
def A( self , lowercase__):
__UpperCAmelCase : Optional[Any] = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
__UpperCAmelCase : Tuple = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def A( self , lowercase__):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.get_input_output_texts(lowercase__)
__UpperCAmelCase : Tuple = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__)
__UpperCAmelCase : Dict = tokenizer.decode(lowercase__ , clean_up_tokenization_spaces=lowercase__)
return text, ids
def A( self):
pass # TODO add if relevant
def A( self):
pass # TODO add if relevant
def A( self):
pass # TODO add if relevant
def A( self):
__UpperCAmelCase : List[Any] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Any = '''こんにちは、世界。 こんばんは、㔺界。'''
__UpperCAmelCase : List[Any] = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
__UpperCAmelCase : List[Any] = tokenizer.tokenize(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
# Testing conversion to ids with special tokens
__UpperCAmelCase : Optional[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
__UpperCAmelCase : Dict = tokenizer.convert_tokens_to_ids(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : str = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : List[str] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
__UpperCAmelCase : Optional[Any] = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
__UpperCAmelCase : Any = tokenizer.encode(lowercase__)
__UpperCAmelCase : List[Any] = tokenizer.decode(lowercase__)
self.assertEqual(lowercase__ , lowercase__)
@slow
def A( self):
__UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''')
# Testing tokenization
__UpperCAmelCase : Dict = '''こんにちは、世界。'''
__UpperCAmelCase : List[str] = '''こんばんは、㔺界。😀'''
__UpperCAmelCase : Union[str, Any] = '''こんにちは、世界。こんばんは、世界。😀'''
__UpperCAmelCase : Optional[Any] = tokenizer.encode(prefix_text + input_text)
__UpperCAmelCase : int = tokenizer.encode('''''' , prefix_text=prefix_text + input_text)
__UpperCAmelCase : List[Any] = tokenizer.encode(lowercase__ , prefix_text=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(lowercase__)
__UpperCAmelCase : Tuple = tokenizer.decode(lowercase__)
__UpperCAmelCase : Optional[Any] = tokenizer.decode(lowercase__)
self.assertEqual(lowercase__ , lowercase__)
self.assertEqual(lowercase__ , lowercase__)
self.assertEqual(lowercase__ , lowercase__)
@slow
def A( self):
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''')
# Testing tokenization
__UpperCAmelCase : Tuple = '''こんにちは、世界。'''
__UpperCAmelCase : List[Any] = '''こんばんは、㔺界。😀'''
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(lowercase__)) - 2
__UpperCAmelCase : Tuple = len(tokenizer.encode(lowercase__)) - 2
__UpperCAmelCase : int = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Optional[Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : List[Any] = tokenizer(prefix_text + input_text).token_type_ids
__UpperCAmelCase : int = tokenizer('''''' , prefix_text=prefix_text + input_text).token_type_ids
__UpperCAmelCase : int = tokenizer(lowercase__ , prefix_text=lowercase__).token_type_ids
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
@slow
def A( self):
__UpperCAmelCase : str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''')
__UpperCAmelCase : Union[str, Any] = tokenizer.encode('''あンいワ''')
__UpperCAmelCase : str = tokenizer.encode('''''' , prefix_text='''あンいワ''')
__UpperCAmelCase : Optional[Any] = tokenizer.encode('''いワ''' , prefix_text='''あン''')
self.assertEqual(tokenizer.decode(lowercase__) , tokenizer.decode(lowercase__))
self.assertEqual(tokenizer.decode(lowercase__) , tokenizer.decode(lowercase__))
self.assertNotEqual(lowercase__ , lowercase__)
self.assertNotEqual(lowercase__ , lowercase__)
self.assertEqual(x_token_a[1] , x_token_a[-1]) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3]) # SEG token
@slow
def A( self):
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''')
__UpperCAmelCase : List[str] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
__UpperCAmelCase : List[Any] = tokenizer(lowercase__ , padding=lowercase__)
__UpperCAmelCase : Tuple = tokenizer.batch_encode_plus(lowercase__ , padding=lowercase__)
# fmt: off
__UpperCAmelCase : str = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
__UpperCAmelCase : List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowercase__)
self.assertListEqual(x_token.token_type_ids , lowercase__)
self.assertListEqual(x_token.attention_mask , lowercase__)
self.assertListEqual(x_token_a.input_ids , lowercase__)
self.assertListEqual(x_token_a.token_type_ids , lowercase__)
self.assertListEqual(x_token_a.attention_mask , lowercase__)
def A( self):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def A( self):
# tokenizer has no padding token
pass
| 675 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : str = set()
__UpperCAmelCase : Any = []
def parse_line(lowercase_ ):
for line in fp:
if isinstance(lowercase_ , lowercase_ ):
__UpperCAmelCase : List[Any] = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(lowercase_ ) > 0:
__UpperCAmelCase : Any = '''\n'''.join(lowercase_ )
# Only keep the warnings specified in `targets`
if any(f": {x}: " in warning for x in targets ):
selected_warnings.add(lowercase_ )
buffer.clear()
continue
else:
__UpperCAmelCase : str = line.strip()
buffer.append(lowercase_ )
if from_gh:
for filename in os.listdir(lowercase_ ):
__UpperCAmelCase : Optional[Any] = os.path.join(lowercase_ , lowercase_ )
if not os.path.isdir(lowercase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowercase_ ) as fp:
parse_line(lowercase_ )
else:
try:
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowercase_ ) as fp:
parse_line(lowercase_ )
except Exception:
logger.warning(
f"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = set()
__UpperCAmelCase : Optional[Any] = [os.path.join(lowercase_ , lowercase_ ) for p in os.listdir(lowercase_ ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowercase_ , lowercase_ ) )
return selected_warnings
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
return values.split(''',''' )
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowerCAmelCase = extract_warnings(args.output_dir, args.targets)
lowerCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 675 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__)
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
__UpperCAmelCase : Dict = int(lowercase__)
__UpperCAmelCase : Tuple = dict(sorted(self.labels.items()))
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 4.0 , lowercase__ = None , lowercase__ = 5_0 , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : List[str] = len(lowercase__)
__UpperCAmelCase : str = self.transformer.config.sample_size
__UpperCAmelCase : List[str] = self.transformer.config.in_channels
__UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
__UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase__ , device=self.device).reshape(-1)
__UpperCAmelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device)
__UpperCAmelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
__UpperCAmelCase : List[str] = latent_model_input[: len(lowercase__) // 2]
__UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0)
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : Any = t
if not torch.is_tensor(lowercase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase : List[str] = latent_model_input.device.type == '''mps'''
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
__UpperCAmelCase : Any = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , len(lowercase__) // 2 , dim=0)
__UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase : str = torch.cat([half_eps, half_eps] , dim=0)
__UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , lowercase__ , dim=1)
else:
__UpperCAmelCase : Any = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
__UpperCAmelCase : List[Any] = latent_model_input
__UpperCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase : Optional[int] = self.vae.decode(lowercase__).sample
__UpperCAmelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : str = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__)
| 675 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Dict = '''instructblip_vision_model'''
def __init__( self , lowercase__=1_4_0_8 , lowercase__=6_1_4_4 , lowercase__=3_9 , lowercase__=1_6 , lowercase__=2_2_4 , lowercase__=1_4 , lowercase__="gelu" , lowercase__=1e-6 , lowercase__=0.0 , lowercase__=1e-10 , lowercase__=True , **lowercase__ , ):
super().__init__(**lowercase__)
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = patch_size
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Union[str, Any] = attention_dropout
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : str = qkv_bias
@classmethod
def A( cls , lowercase__ , **lowercase__):
cls._set_token_in_kwargs(lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase__ , **lowercase__)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
__UpperCAmelCase : Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(lowercase__ , **lowercase__)
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[Any] = '''instructblip_qformer'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=2 , lowercase__=1_4_0_8 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : List[str] = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : List[Any] = cross_attention_frequency
__UpperCAmelCase : List[str] = encoder_hidden_size
@classmethod
def A( cls , lowercase__ , **lowercase__):
cls._set_token_in_kwargs(lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase__ , **lowercase__)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
__UpperCAmelCase : List[str] = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(lowercase__ , **lowercase__)
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : str = '''instructblip'''
_lowerCAmelCase : int = True
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=3_2 , **lowercase__):
super().__init__(**lowercase__)
if vision_config is None:
__UpperCAmelCase : int = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''')
if qformer_config is None:
__UpperCAmelCase : Dict = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''')
if text_config is None:
__UpperCAmelCase : Dict = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''')
__UpperCAmelCase : Dict = InstructBlipVisionConfig(**lowercase__)
__UpperCAmelCase : Optional[Any] = InstructBlipQFormerConfig(**lowercase__)
__UpperCAmelCase : int = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__UpperCAmelCase : Dict = CONFIG_MAPPING[text_model_type](**lowercase__)
__UpperCAmelCase : int = self.text_config.tie_word_embeddings
__UpperCAmelCase : List[str] = self.text_config.is_encoder_decoder
__UpperCAmelCase : Optional[int] = num_query_tokens
__UpperCAmelCase : List[Any] = self.vision_config.hidden_size
__UpperCAmelCase : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCAmelCase : Optional[Any] = 1.0
__UpperCAmelCase : List[Any] = 0.0_2
@classmethod
def A( cls , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase__ , )
def A( self):
__UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__)
__UpperCAmelCase : int = self.vision_config.to_dict()
__UpperCAmelCase : Dict = self.qformer_config.to_dict()
__UpperCAmelCase : Optional[int] = self.text_config.to_dict()
__UpperCAmelCase : Tuple = self.__class__.model_type
return output
| 675 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 | 1 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
for i in range(1 , int(sqrt(lowercase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase_ ):
total += i + n // i
elif i == sqrt(lowercase_ ):
total += i
return total - n
def __SCREAMING_SNAKE_CASE ( lowercase_ = 10000 ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = sum(
i
for i in range(1 , lowercase_ )
if sum_of_divisors(sum_of_divisors(lowercase_ ) ) == i and sum_of_divisors(lowercase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 675 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675 | 1 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowerCamelCase ( nn.Module ):
def __init__( self):
super().__init__()
__UpperCAmelCase : int = nn.Linear(3 , 4)
__UpperCAmelCase : Tuple = nn.BatchNormad(4)
__UpperCAmelCase : str = nn.Linear(4 , 5)
def A( self , lowercase__):
return self.lineara(self.batchnorm(self.lineara(lowercase__)))
class lowerCamelCase ( _UpperCamelCase ):
def A( self , lowercase__ , *lowercase__ , **lowercase__):
return (args[0] + 1,) + args[1:], kwargs
class lowerCamelCase ( _UpperCamelCase ):
def A( self , lowercase__ , lowercase__):
return output + 1
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : Union[str, Any] = ModelForTest()
__UpperCAmelCase : str = ModelHook()
add_hook_to_module(lowercase__ , lowercase__)
self.assertEqual(test_model._hf_hook , lowercase__)
self.assertTrue(hasattr(lowercase__ , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(lowercase__)
self.assertFalse(hasattr(lowercase__ , '''_hf_hook'''))
self.assertFalse(hasattr(lowercase__ , '''_old_forward'''))
def A( self):
__UpperCAmelCase : Optional[int] = ModelForTest()
__UpperCAmelCase : str = ModelHook()
add_hook_to_module(lowercase__ , lowercase__)
add_hook_to_module(lowercase__ , lowercase__ , append=lowercase__)
self.assertEqual(isinstance(test_model._hf_hook , lowercase__) , lowercase__)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(lowercase__ , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(lowercase__)
self.assertFalse(hasattr(lowercase__ , '''_hf_hook'''))
self.assertFalse(hasattr(lowercase__ , '''_old_forward'''))
def A( self):
__UpperCAmelCase : Tuple = ModelForTest()
__UpperCAmelCase : Union[str, Any] = torch.randn(2 , 3)
__UpperCAmelCase : str = test_model(x + 1)
__UpperCAmelCase : List[Any] = test_model(x + 2)
__UpperCAmelCase : Union[str, Any] = PreForwardHook()
add_hook_to_module(lowercase__ , lowercase__)
__UpperCAmelCase : List[Any] = test_model(lowercase__)
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-5))
# Attaching a hook to a model when it already has one replaces, does not chain
__UpperCAmelCase : Union[str, Any] = PreForwardHook()
add_hook_to_module(lowercase__ , lowercase__)
__UpperCAmelCase : List[Any] = test_model(lowercase__)
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-5))
# You need to use the sequential hook to chain two or more hooks
__UpperCAmelCase : Optional[Any] = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(lowercase__ , lowercase__)
__UpperCAmelCase : Optional[int] = test_model(lowercase__)
assert torch.allclose(lowercase__ , lowercase__ , atol=1e-5)
def A( self):
__UpperCAmelCase : Optional[int] = ModelForTest()
__UpperCAmelCase : Dict = torch.randn(2 , 3)
__UpperCAmelCase : List[str] = test_model(lowercase__)
__UpperCAmelCase : Optional[Any] = PostForwardHook()
add_hook_to_module(lowercase__ , lowercase__)
__UpperCAmelCase : str = test_model(lowercase__)
self.assertTrue(torch.allclose(lowercase__ , output + 1 , atol=1e-5))
# Attaching a hook to a model when it already has one replaces, does not chain
__UpperCAmelCase : Optional[int] = PostForwardHook()
add_hook_to_module(lowercase__ , lowercase__)
__UpperCAmelCase : Any = test_model(lowercase__)
self.assertTrue(torch.allclose(lowercase__ , output + 1 , atol=1e-5))
# You need to use the sequential hook to chain two or more hooks
__UpperCAmelCase : Any = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(lowercase__ , lowercase__)
__UpperCAmelCase : List[Any] = test_model(lowercase__)
assert torch.allclose(lowercase__ , output + 2 , atol=1e-5)
def A( self):
__UpperCAmelCase : Optional[int] = ModelForTest()
__UpperCAmelCase : Any = torch.randn(2 , 3)
__UpperCAmelCase : Optional[Any] = test_model(lowercase__)
__UpperCAmelCase : Tuple = PostForwardHook()
add_hook_to_module(lowercase__ , lowercase__)
__UpperCAmelCase : Optional[Any] = test_model(lowercase__)
self.assertTrue(torch.allclose(lowercase__ , output + 1))
self.assertTrue(outputa.requires_grad)
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Dict = test_model(lowercase__)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def A( self):
__UpperCAmelCase : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
__UpperCAmelCase : Any = torch.randn(2 , 3)
__UpperCAmelCase : Union[str, Any] = model(lowercase__)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowercase__ , AlignDevicesHook(io_same_device=lowercase__))
__UpperCAmelCase : int = torch.randn(2 , 3).to(0)
__UpperCAmelCase : str = model(lowercase__)
self.assertEqual(output.device , torch.device(0))
def A( self):
__UpperCAmelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
__UpperCAmelCase : List[str] = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase__))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase__))
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase__))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Dict = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , lowercase__)
__UpperCAmelCase : str = torch.randn(2 , 3)
__UpperCAmelCase : int = model(lowercase__)
self.assertEqual(output.device , lowercase__)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
__UpperCAmelCase : str = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase__))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase__))
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase__))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
__UpperCAmelCase : Dict = torch.randn(2 , 3)
__UpperCAmelCase : Optional[int] = model(lowercase__)
self.assertEqual(output.device , lowercase__)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def A( self):
__UpperCAmelCase : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
__UpperCAmelCase : str = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(lowercase__ , execution_device=lowercase__ , offload=lowercase__)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Union[str, Any] = torch.device(lowercase__)
self.assertEqual(model.batchnorm.running_mean.device , lowercase__)
__UpperCAmelCase : Optional[Any] = torch.randn(2 , 3)
__UpperCAmelCase : Any = model(lowercase__)
self.assertEqual(output.device , lowercase__)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase__)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(lowercase__ , execution_device=lowercase__ , offload=lowercase__ , offload_buffers=lowercase__)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
__UpperCAmelCase : List[str] = torch.randn(2 , 3)
__UpperCAmelCase : int = model(lowercase__)
self.assertEqual(output.device , lowercase__)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase__)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def A( self):
__UpperCAmelCase : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
__UpperCAmelCase : Tuple = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
lowercase__ , execution_device=lowercase__ , offload=lowercase__ , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Optional[int] = torch.device(lowercase__)
self.assertEqual(model.batchnorm.running_mean.device , lowercase__)
__UpperCAmelCase : Optional[int] = torch.randn(2 , 3)
__UpperCAmelCase : str = model(lowercase__)
self.assertEqual(output.device , lowercase__)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase__)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
lowercase__ , execution_device=lowercase__ , offload=lowercase__ , weights_map=model.state_dict() , offload_buffers=lowercase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
__UpperCAmelCase : Dict = torch.randn(2 , 3)
__UpperCAmelCase : Dict = model(lowercase__)
self.assertEqual(output.device , lowercase__)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase__)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
def __init__( self , lowercase__):
__UpperCAmelCase : List[str] = str(id_)
__UpperCAmelCase : str = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : str = []
__UpperCAmelCase : Union[str, Any] = {} # {vertex:distance}
def __lt__( self , lowercase__):
return self.key < other.key
def __repr__( self):
return self.id
def A( self , lowercase__):
self.neighbors.append(lowercase__)
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : int = weight
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase_ )
graph[b - 1].add_edge(graph[a - 1] , lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : int = []
for u in graph:
__UpperCAmelCase : List[str] = math.inf
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Tuple = graph[:]
while q:
__UpperCAmelCase : Any = min(lowercase_ )
q.remove(lowercase_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__UpperCAmelCase : List[Any] = u
__UpperCAmelCase : List[str] = u.edges[v.id]
for i in range(1 , len(lowercase_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Iterator[tuple]:
'''simple docstring'''
for u in graph:
__UpperCAmelCase : Any = math.inf
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Any = list(lowercase_ )
hq.heapify(lowercase_ )
while h:
__UpperCAmelCase : Union[str, Any] = hq.heappop(lowercase_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__UpperCAmelCase : Any = u
__UpperCAmelCase : Dict = u.edges[v.id]
hq.heapify(lowercase_ )
for i in range(1 , len(lowercase_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[list[int]]:
'''simple docstring'''
__UpperCAmelCase : list[list[int]] = []
__UpperCAmelCase : list[int] = []
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : int = sum(lowercase_ )
create_state_space_tree(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return result
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> None:
'''simple docstring'''
if sum(lowercase_ ) > max_sum or (remaining_nums_sum + sum(lowercase_ )) < max_sum:
return
if sum(lowercase_ ) == max_sum:
result.append(lowercase_ )
return
for index in range(lowercase_ , len(lowercase_ ) ):
create_state_space_tree(
lowercase_ , lowercase_ , index + 1 , [*path, nums[index]] , lowercase_ , remaining_nums_sum - nums[index] , )
lowerCAmelCase = [3, 34, 4, 12, 5, 2]
lowerCAmelCase = 9
lowerCAmelCase = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 675 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase = 16
lowerCAmelCase = 32
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 16 ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCAmelCase : Tuple = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowercase_ ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase : Any = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase : List[Any] = 8
else:
__UpperCAmelCase : str = None
return tokenizer.pad(
lowercase_ , padding='''longest''' , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
__UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
__UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase = mocked_dataloaders # noqa: F811
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowercase_ ) == "1":
__UpperCAmelCase : List[str] = 2
# New Code #
__UpperCAmelCase : Tuple = int(args.gradient_accumulation_steps )
__UpperCAmelCase : int = int(args.local_sgd_steps )
# Initialize accelerator
__UpperCAmelCase : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : Tuple = config['''lr''']
__UpperCAmelCase : Dict = int(config['''num_epochs'''] )
__UpperCAmelCase : Union[str, Any] = int(config['''seed'''] )
__UpperCAmelCase : Dict = int(config['''batch_size'''] )
__UpperCAmelCase : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(lowercase_ )
__UpperCAmelCase , __UpperCAmelCase : Dict = get_dataloaders(lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : int = AdamW(params=model.parameters() , lr=lowercase_ )
# Instantiate scheduler
__UpperCAmelCase : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
with LocalSGD(
accelerator=lowercase_ , model=lowercase_ , local_sgd_steps=lowercase_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase_ ):
__UpperCAmelCase : Optional[Any] = model(**lowercase_ )
__UpperCAmelCase : Union[str, Any] = output.loss
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : List[str] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
__UpperCAmelCase : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , lowercase_ )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase_ , default=lowercase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowercase_ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=lowercase_ , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__UpperCAmelCase : List[str] = parser.parse_args()
__UpperCAmelCase : Optional[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 675 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
lowerCAmelCase = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=3_2 , lowercase__=3 , lowercase__=4 , lowercase__=[1_0, 2_0, 3_0, 4_0] , lowercase__=[2, 2, 3, 2] , lowercase__=True , lowercase__=True , lowercase__=3_7 , lowercase__="gelu" , lowercase__=1_0 , lowercase__=0.0_2 , lowercase__=["stage2", "stage3", "stage4"] , lowercase__=3 , lowercase__=None , ):
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : str = num_channels
__UpperCAmelCase : Optional[Any] = num_stages
__UpperCAmelCase : Union[str, Any] = hidden_sizes
__UpperCAmelCase : Optional[int] = depths
__UpperCAmelCase : Union[str, Any] = is_training
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = type_sequence_label_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : Union[str, Any] = out_features
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : Union[str, Any] = scope
__UpperCAmelCase : Optional[Any] = num_stages
def A( self):
__UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def A( self):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def A( self):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowercase__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=lowercase__ , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = UperNetForSemanticSegmentation(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : str = model(lowercase__)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Any = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_lowerCAmelCase : Dict = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
_lowerCAmelCase : int = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : int = False
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : Tuple = False
def A( self):
__UpperCAmelCase : Dict = UperNetModelTester(self)
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=3_7)
def A( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A( self):
return
def A( self):
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class(lowercase__)
__UpperCAmelCase : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Dict = [*signature.parameters.keys()]
__UpperCAmelCase : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__)
def A( self):
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase__)
@unittest.skip(reason='''UperNet does not use inputs_embeds''')
def A( self):
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''')
def A( self):
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def A( self):
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def A( self):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def A( self):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def A( self):
pass
def A( self):
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = model_class(lowercase__)
model.to(lowercase__)
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[str] = model(**self._prepare_for_class(lowercase__ , lowercase__))
__UpperCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase__) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : List[Any] = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = _config_zero_init(lowercase__)
__UpperCAmelCase : Any = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = model_class(config=lowercase__)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip(reason='''UperNet does not have tied weights''')
def A( self):
pass
@slow
def A( self):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Tuple = UperNetForSemanticSegmentation.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
__UpperCAmelCase : Tuple = Image.open(lowercase_ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : Any = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''')
__UpperCAmelCase : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''').to(lowercase__)
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : Optional[Any] = processor(images=lowercase__ , return_tensors='''pt''').to(lowercase__)
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**lowercase__)
__UpperCAmelCase : str = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2))
self.assertEqual(outputs.logits.shape , lowercase__)
__UpperCAmelCase : List[str] = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]]).to(lowercase__)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase__ , atol=1e-4))
def A( self):
__UpperCAmelCase : List[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''')
__UpperCAmelCase : Tuple = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''').to(lowercase__)
__UpperCAmelCase : Tuple = prepare_img()
__UpperCAmelCase : List[str] = processor(images=lowercase__ , return_tensors='''pt''').to(lowercase__)
with torch.no_grad():
__UpperCAmelCase : str = model(**lowercase__)
__UpperCAmelCase : int = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2))
self.assertEqual(outputs.logits.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]]).to(lowercase__)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase__ , atol=1e-4))
| 675 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 1 |
lowerCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> list[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = set()
# keep track of all the paths to be checked
__UpperCAmelCase : Tuple = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__UpperCAmelCase : List[str] = queue.pop(0 )
# get the last node from the path
__UpperCAmelCase : Any = path[-1]
if node not in explored:
__UpperCAmelCase : str = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__UpperCAmelCase : int = list(lowercase_ )
new_path.append(lowercase_ )
queue.append(lowercase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowercase_ )
# in case there's no path between the 2 nodes
return []
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__UpperCAmelCase : Dict = [start]
__UpperCAmelCase : Optional[int] = set(lowercase_ )
# Keep tab on distances from `start` node.
__UpperCAmelCase : int = {start: 0, target: -1}
while queue:
__UpperCAmelCase : int = queue.pop(0 )
if node == target:
__UpperCAmelCase : Dict = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowercase_ )
queue.append(lowercase_ )
__UpperCAmelCase : str = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 675 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
_lowerCAmelCase : Optional[List[bool]]
_lowerCAmelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 675 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCamelCase :
_lowerCAmelCase : CommonSchedulerState
# setable values
_lowerCAmelCase : jnp.ndarray
_lowerCAmelCase : jnp.ndarray
_lowerCAmelCase : Optional[int] = None
@classmethod
def A( cls , lowercase__ , lowercase__ , lowercase__):
return cls(common=lowercase__ , init_noise_sigma=lowercase__ , timesteps=lowercase__)
@dataclass
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : DDPMSchedulerState
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
_lowerCAmelCase : Dict = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowerCAmelCase : jnp.dtype
@property
def A( self):
return True
@register_to_config
def __init__( self , lowercase__ = 1_0_0_0 , lowercase__ = 0.0_0_0_1 , lowercase__ = 0.0_2 , lowercase__ = "linear" , lowercase__ = None , lowercase__ = "fixed_small" , lowercase__ = True , lowercase__ = "epsilon" , lowercase__ = jnp.floataa , ):
__UpperCAmelCase : Tuple = dtype
def A( self , lowercase__ = None):
if common is None:
__UpperCAmelCase : Any = CommonSchedulerState.create(self)
# standard deviation of the initial noise distribution
__UpperCAmelCase : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype)
__UpperCAmelCase : str = jnp.arange(0 , self.config.num_train_timesteps).round()[::-1]
return DDPMSchedulerState.create(
common=lowercase__ , init_noise_sigma=lowercase__ , timesteps=lowercase__ , )
def A( self , lowercase__ , lowercase__ , lowercase__ = None):
return sample
def A( self , lowercase__ , lowercase__ , lowercase__ = ()):
__UpperCAmelCase : Tuple = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__UpperCAmelCase : Tuple = (jnp.arange(0 , lowercase__) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowercase__ , timesteps=lowercase__ , )
def A( self , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None):
__UpperCAmelCase : List[str] = state.common.alphas_cumprod[t]
__UpperCAmelCase : Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__UpperCAmelCase : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__UpperCAmelCase : List[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__UpperCAmelCase : Optional[int] = jnp.clip(lowercase__ , a_min=1e-20)
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__UpperCAmelCase : List[Any] = jnp.log(jnp.clip(lowercase__ , a_min=1e-20))
elif variance_type == "fixed_large":
__UpperCAmelCase : Dict = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__UpperCAmelCase : str = jnp.log(state.common.betas[t])
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__UpperCAmelCase : Optional[Any] = variance
__UpperCAmelCase : Optional[Any] = state.common.betas[t]
__UpperCAmelCase : Union[str, Any] = (predicted_variance + 1) / 2
__UpperCAmelCase : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = True , ):
__UpperCAmelCase : int = timestep
if key is None:
__UpperCAmelCase : Any = jax.random.PRNGKey(0)
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = jnp.split(lowercase__ , sample.shape[1] , axis=1)
else:
__UpperCAmelCase : Dict = None
# 1. compute alphas, betas
__UpperCAmelCase : Dict = state.common.alphas_cumprod[t]
__UpperCAmelCase : Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
__UpperCAmelCase : Tuple = 1 - alpha_prod_t
__UpperCAmelCase : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__UpperCAmelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__UpperCAmelCase : Union[str, Any] = model_output
elif self.config.prediction_type == "v_prediction":
__UpperCAmelCase : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
''' for the FlaxDDPMScheduler.''')
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__UpperCAmelCase : Any = jnp.clip(lowercase__ , -1 , 1)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : Any = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__UpperCAmelCase : Optional[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__UpperCAmelCase : Optional[Any] = jax.random.split(lowercase__ , num=1)
__UpperCAmelCase : str = jax.random.normal(lowercase__ , shape=model_output.shape , dtype=self.dtype)
return (self._get_variance(lowercase__ , lowercase__ , predicted_variance=lowercase__) ** 0.5) * noise
__UpperCAmelCase : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype))
__UpperCAmelCase : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowercase__ , state=lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
return add_noise_common(state.common , lowercase__ , lowercase__ , lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
return get_velocity_common(state.common , lowercase__ , lowercase__ , lowercase__)
def __len__( self):
return self.config.num_train_timesteps
| 675 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 1 |
lowerCAmelCase = tuple[float, float, float]
lowerCAmelCase = tuple[float, float, float]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Vectorad:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = end_pointa[0] - end_pointa[0]
__UpperCAmelCase : Union[str, Any] = end_pointa[1] - end_pointa[1]
__UpperCAmelCase : List[str] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Vectorad:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
__UpperCAmelCase : str = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__UpperCAmelCase : str = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
return tuple(round(lowercase_ , lowercase_ ) for x in vector ) == (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = 10 ) -> bool:
'''simple docstring'''
__UpperCAmelCase : Tuple = create_vector(lowercase_ , lowercase_ )
__UpperCAmelCase : str = create_vector(lowercase_ , lowercase_ )
return is_zero_vector(get_ad_vectors_cross(lowercase_ , lowercase_ ) , lowercase_ )
| 675 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = 0
if start < end:
__UpperCAmelCase : List[str] = randint(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = a[end]
__UpperCAmelCase : int = a[pivot]
__UpperCAmelCase : List[Any] = temp
__UpperCAmelCase , __UpperCAmelCase : Dict = _in_place_partition(lowercase_ , lowercase_ , lowercase_ )
count += _in_place_quick_sort(lowercase_ , lowercase_ , p - 1 )
count += _in_place_quick_sort(lowercase_ , p + 1 , lowercase_ )
return count
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : int = randint(lowercase_ , lowercase_ )
__UpperCAmelCase : Dict = a[end]
__UpperCAmelCase : Any = a[pivot]
__UpperCAmelCase : Optional[int] = temp
__UpperCAmelCase : List[str] = start - 1
for index in range(lowercase_ , lowercase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__UpperCAmelCase : Optional[int] = new_pivot_index + 1
__UpperCAmelCase : Union[str, Any] = a[new_pivot_index]
__UpperCAmelCase : Optional[int] = a[index]
__UpperCAmelCase : Optional[int] = temp
__UpperCAmelCase : Tuple = a[new_pivot_index + 1]
__UpperCAmelCase : Optional[Any] = a[end]
__UpperCAmelCase : Optional[int] = temp
return new_pivot_index + 1, count
lowerCAmelCase = TemporaryFile()
lowerCAmelCase = 100 # 1000 elements are to be sorted
lowerCAmelCase ,lowerCAmelCase = 0, 1 # mean and standard deviation
lowerCAmelCase = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowerCAmelCase = np.load(outfile)
lowerCAmelCase = len(M) - 1
lowerCAmelCase = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 675 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 1 |
from __future__ import annotations
from math import pi
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class lowerCamelCase ( unittest.TestCase ):
def A( self):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='''utf-8''' , check=lowercase__ , )
assert hasattr(self , '''env''')
def A( self , lowercase__):
__UpperCAmelCase : List[Any] = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
__UpperCAmelCase : List[str] = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowercase__ , instance_count=lowercase__ , instance_type=self.instance_type , debugger_hook_config=lowercase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowercase__ , py_version='''py36''' , )
def A( self , lowercase__):
TrainingJobAnalytics(lowercase__).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def A( self , lowercase__):
# create estimator
__UpperCAmelCase : int = self.create_estimator(lowercase__)
# run training
estimator.fit()
# result dataframe
__UpperCAmelCase : int = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__UpperCAmelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''])
__UpperCAmelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__UpperCAmelCase : Any = (
Session().describe_training_job(estimator.latest_training_job.name).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy)
assert all(t <= self.results['''eval_loss'''] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , '''w''') as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowercase__)
| 675 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if len(lowercase_ ) < k or k < 0:
raise ValueError('''Invalid Input''' )
__UpperCAmelCase : List[str] = sum(array[:k] )
for i in range(len(lowercase_ ) - k ):
__UpperCAmelCase : int = current_sum - array[i] + array[i + k]
__UpperCAmelCase : Optional[int] = max(lowercase_ , lowercase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase = [randint(-1_000, 1_000) for i in range(100)]
lowerCAmelCase = randint(0, 110)
print(F'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 675 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCamelCase ( pl.LightningModule ):
def __init__( self , lowercase__):
super().__init__()
__UpperCAmelCase : List[str] = model
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : int = nn.Linear(self.model.config.hidden_size , self.num_labels)
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = LongformerModel.from_pretrained(lowercase_ )
__UpperCAmelCase : List[Any] = LightningModel(lowercase_ )
__UpperCAmelCase : Any = torch.load(lowercase_ , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
__UpperCAmelCase : Optional[Any] = LongformerForQuestionAnswering.from_pretrained(lowercase_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowercase_ )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 675 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__)
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
__UpperCAmelCase : Dict = int(lowercase__)
__UpperCAmelCase : Tuple = dict(sorted(self.labels.items()))
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 4.0 , lowercase__ = None , lowercase__ = 5_0 , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : List[str] = len(lowercase__)
__UpperCAmelCase : str = self.transformer.config.sample_size
__UpperCAmelCase : List[str] = self.transformer.config.in_channels
__UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
__UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase__ , device=self.device).reshape(-1)
__UpperCAmelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device)
__UpperCAmelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
__UpperCAmelCase : List[str] = latent_model_input[: len(lowercase__) // 2]
__UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0)
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : Any = t
if not torch.is_tensor(lowercase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase : List[str] = latent_model_input.device.type == '''mps'''
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
__UpperCAmelCase : Any = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , len(lowercase__) // 2 , dim=0)
__UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase : str = torch.cat([half_eps, half_eps] , dim=0)
__UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , lowercase__ , dim=1)
else:
__UpperCAmelCase : Any = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
__UpperCAmelCase : List[Any] = latent_model_input
__UpperCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase : Optional[int] = self.vae.decode(lowercase__).sample
__UpperCAmelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : str = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCAmelCase = """pytorch_model.bin"""
lowerCAmelCase = """pytorch_model.bin.index.json"""
lowerCAmelCase = """adapter_config.json"""
lowerCAmelCase = """adapter_model.bin"""
lowerCAmelCase = """adapter_model.safetensors"""
lowerCAmelCase = """tf_model.h5"""
lowerCAmelCase = """tf_model.h5.index.json"""
lowerCAmelCase = """model.ckpt"""
lowerCAmelCase = """flax_model.msgpack"""
lowerCAmelCase = """flax_model.msgpack.index.json"""
lowerCAmelCase = """model.safetensors"""
lowerCAmelCase = """model.safetensors.index.json"""
lowerCAmelCase = """config.json"""
lowerCAmelCase = """preprocessor_config.json"""
lowerCAmelCase = FEATURE_EXTRACTOR_NAME
lowerCAmelCase = """generation_config.json"""
lowerCAmelCase = """modelcard.json"""
lowerCAmelCase = """▁"""
lowerCAmelCase = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCAmelCase = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCAmelCase = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCAmelCase = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
'''simple docstring'''
if version.parse(lowercase_ ) < version.parse(lowercase_ ):
if "dev" in min_version:
__UpperCAmelCase : Union[str, Any] = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
__UpperCAmelCase : List[Any] = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 675 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 1 |
from torch import nn
class lowerCamelCase ( nn.Module ):
def __init__( self , lowercase__ , lowercase__):
super().__init__()
__UpperCAmelCase : str = class_size
__UpperCAmelCase : Union[str, Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__UpperCAmelCase : List[str] = nn.Linear(lowercase__ , lowercase__)
def A( self , lowercase__):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
__UpperCAmelCase : Union[str, Any] = self.mlp(lowercase__)
return logits
| 675 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=0 ) -> Optional[int]:
'''simple docstring'''
return sorted(lowercase_ , key=lambda lowercase_ : x[column] )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=float('''inf''' ) ) -> Any:
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowercase_ ):
__UpperCAmelCase : List[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCAmelCase : Any = current_dis
return min_dis
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=float('''inf''' ) ) -> Optional[int]:
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , lowercase_ ):
for j in range(max(0 , i - 6 ) , lowercase_ ):
__UpperCAmelCase : str = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCAmelCase : Dict = current_dis
return min_dis
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(lowercase_ , lowercase_ )
# recursion
__UpperCAmelCase : int = points_counts // 2
__UpperCAmelCase : Tuple = closest_pair_of_points_sqr(
lowercase_ , points_sorted_on_y[:mid] , lowercase_ )
__UpperCAmelCase : Optional[int] = closest_pair_of_points_sqr(
lowercase_ , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCAmelCase : int = min(lowercase_ , lowercase_ )
__UpperCAmelCase : str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowercase_ )
__UpperCAmelCase : Dict = dis_between_closest_in_strip(
lowercase_ , len(lowercase_ ) , lowercase_ )
return min(lowercase_ , lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : str = column_based_sort(lowercase_ , column=0 )
__UpperCAmelCase : Any = column_based_sort(lowercase_ , column=1 )
return (
closest_pair_of_points_sqr(
lowercase_ , lowercase_ , lowercase_ )
) ** 0.5
if __name__ == "__main__":
lowerCAmelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 675 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
lowerCAmelCase = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
lowerCAmelCase = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
lowerCAmelCase = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 | 1 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Any = '''vision-encoder-decoder'''
_lowerCAmelCase : Tuple = True
def __init__( self , **lowercase__):
super().__init__(**lowercase__)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}")
__UpperCAmelCase : int = kwargs.pop('''encoder''')
__UpperCAmelCase : Optional[int] = encoder_config.pop('''model_type''')
__UpperCAmelCase : Tuple = kwargs.pop('''decoder''')
__UpperCAmelCase : Any = decoder_config.pop('''model_type''')
__UpperCAmelCase : Union[str, Any] = AutoConfig.for_model(lowercase__ , **lowercase__)
__UpperCAmelCase : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__)
__UpperCAmelCase : str = True
@classmethod
def A( cls , lowercase__ , lowercase__ , **lowercase__):
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__)
__UpperCAmelCase : Optional[int] = self.encoder.to_dict()
__UpperCAmelCase : Optional[Any] = self.decoder.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = version.parse('''1.11''' )
@property
def A( self):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def A( self):
return 1e-4
@property
def A( self):
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}})
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
__UpperCAmelCase : Dict = OrderedDict()
__UpperCAmelCase : str = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
__UpperCAmelCase : str = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
__UpperCAmelCase : str = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def A( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ):
import torch
__UpperCAmelCase : Optional[Any] = OrderedDict()
__UpperCAmelCase : int = super().generate_dummy_inputs(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = dummy_input['''input_ids'''].shape
__UpperCAmelCase : List[Any] = (batch, encoder_sequence, self._config.encoder_hidden_size)
__UpperCAmelCase : Optional[Any] = dummy_input.pop('''input_ids''')
__UpperCAmelCase : Optional[int] = dummy_input.pop('''attention_mask''')
__UpperCAmelCase : Tuple = torch.zeros(lowercase__)
return common_inputs
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
pass
def A( self , lowercase__):
return VisionEncoderDecoderEncoderOnnxConfig(lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ = "default"):
__UpperCAmelCase : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowercase__ , lowercase__)
| 675 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675 | 1 |
from __future__ import annotations
lowerCAmelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[float]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Dict = len(lowercase_ )
for i in range(lowercase_ ):
__UpperCAmelCase : float = -1
for j in range(i + 1 , lowercase_ ):
if arr[i] < arr[j]:
__UpperCAmelCase : List[Any] = arr[j]
break
result.append(lowercase_ )
return result
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[float]:
'''simple docstring'''
__UpperCAmelCase : Any = []
for i, outer in enumerate(lowercase_ ):
__UpperCAmelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
__UpperCAmelCase : Any = inner
break
result.append(lowercase_ )
return result
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[float]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : list[float] = []
__UpperCAmelCase : list[float] = [-1] * arr_size
for index in reversed(range(lowercase_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__UpperCAmelCase : int = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 675 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
def decorator(lowercase_ ):
__UpperCAmelCase : Dict = getattr(lowercase_ , '''handle_key''' , [] )
handle += [key]
setattr(lowercase_ , '''handle_key''' , lowercase_ )
return func
return decorator
def __SCREAMING_SNAKE_CASE ( *lowercase_ ) -> int:
'''simple docstring'''
def decorator(lowercase_ ):
__UpperCAmelCase : List[Any] = getattr(lowercase_ , '''handle_key''' , [] )
handle += keys
setattr(lowercase_ , '''handle_key''' , lowercase_ )
return func
return decorator
class lowerCamelCase ( _UpperCamelCase ):
def __new__( cls , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = super().__new__(cls , lowercase__ , lowercase__ , lowercase__)
if not hasattr(lowercase__ , '''key_handler'''):
setattr(lowercase__ , '''key_handler''' , {})
setattr(lowercase__ , '''handle_input''' , KeyHandler.handle_input)
for value in attrs.values():
__UpperCAmelCase : str = getattr(lowercase__ , '''handle_key''' , [])
for key in handled_keys:
__UpperCAmelCase : List[str] = value
return new_cls
@staticmethod
def A( cls):
__UpperCAmelCase : Optional[int] = get_character()
if char != KEYMAP["undefined"]:
__UpperCAmelCase : Any = ord(lowercase__)
__UpperCAmelCase : int = cls.key_handler.get(lowercase__)
if handler:
__UpperCAmelCase : List[str] = char
return handler(cls)
else:
return None
def __SCREAMING_SNAKE_CASE ( cls ) -> int:
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 675 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675 | 1 |
from typing import Any
class lowerCamelCase :
def __init__( self , lowercase__):
__UpperCAmelCase : Optional[Any] = data
__UpperCAmelCase : Tuple = None
def __repr__( self):
return F"Node({self.data})"
class lowerCamelCase :
def __init__( self):
__UpperCAmelCase : List[Any] = None
def __iter__( self):
__UpperCAmelCase : Tuple = self.head
while node:
yield node.data
__UpperCAmelCase : int = node.next
def __len__( self):
return sum(1 for _ in self)
def __repr__( self):
return "->".join([str(lowercase__) for item in self])
def __getitem__( self , lowercase__):
if not 0 <= index < len(self):
raise ValueError('''list index out of range.''')
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__( self , lowercase__ , lowercase__):
if not 0 <= index < len(self):
raise ValueError('''list index out of range.''')
__UpperCAmelCase : Dict = self.head
for _ in range(lowercase__):
__UpperCAmelCase : Optional[int] = current.next
__UpperCAmelCase : str = data
def A( self , lowercase__):
self.insert_nth(len(self) , lowercase__)
def A( self , lowercase__):
self.insert_nth(0 , lowercase__)
def A( self , lowercase__ , lowercase__):
if not 0 <= index <= len(self):
raise IndexError('''list index out of range''')
__UpperCAmelCase : List[str] = Node(lowercase__)
if self.head is None:
__UpperCAmelCase : Union[str, Any] = new_node
elif index == 0:
__UpperCAmelCase : Dict = self.head # link new_node to head
__UpperCAmelCase : Dict = new_node
else:
__UpperCAmelCase : Optional[int] = self.head
for _ in range(index - 1):
__UpperCAmelCase : List[str] = temp.next
__UpperCAmelCase : Union[str, Any] = temp.next
__UpperCAmelCase : List[Any] = new_node
def A( self): # print every node data
print(self)
def A( self):
return self.delete_nth(0)
def A( self): # delete from tail
return self.delete_nth(len(self) - 1)
def A( self , lowercase__ = 0):
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError('''List index out of range.''')
__UpperCAmelCase : Union[str, Any] = self.head # default first node
if index == 0:
__UpperCAmelCase : str = self.head.next
else:
__UpperCAmelCase : List[Any] = self.head
for _ in range(index - 1):
__UpperCAmelCase : List[Any] = temp.next
__UpperCAmelCase : List[str] = temp.next
__UpperCAmelCase : Dict = temp.next.next
return delete_node.data
def A( self):
return self.head is None
def A( self):
__UpperCAmelCase : Dict = None
__UpperCAmelCase : int = self.head
while current:
# Store the current node's next node.
__UpperCAmelCase : List[str] = current.next
# Make the current node's next point backwards
__UpperCAmelCase : Any = prev
# Make the previous node be the current node
__UpperCAmelCase : List[str] = current
# Make the current node the next node (to progress iteration)
__UpperCAmelCase : int = next_node
# Return prev in order to put the head at the end
__UpperCAmelCase : Union[str, Any] = prev
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowercase_ ) == i
linked_list.insert_nth(lowercase_ , i + 1 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowercase_ ) == 9
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCAmelCase : Optional[int] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(-8 , 1 ) )
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [
-9,
100,
Node(77345112 ),
'''dlrow olleH''',
7,
5555,
0,
-1_9_2.5_5_5_5_5,
'''Hello, world!''',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__UpperCAmelCase : Tuple = LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCAmelCase : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCAmelCase : Any = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCAmelCase : int = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase_ )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
from doctest import testmod
testmod()
__UpperCAmelCase : Dict = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(lowercase_ )
print('''\nReading/changing Node data using indexing:''' )
print(f"Element at Position 1: {linked_list[1]}" )
__UpperCAmelCase : List[str] = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(lowercase_ )
print(f"length of linked_list is : {len(lowercase_ )}" )
if __name__ == "__main__":
main()
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = R"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class lowerCamelCase ( _UpperCamelCase ):
@add_start_docstrings(lowercase__)
def __call__( self , lowercase__ , lowercase__ , **lowercase__):
raise NotImplementedError('''StoppingCriteria needs to be subclassed''')
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : Union[str, Any] = max_length
__UpperCAmelCase : Tuple = max_position_embeddings
@add_start_docstrings(lowercase__)
def __call__( self , lowercase__ , lowercase__ , **lowercase__):
__UpperCAmelCase : Any = input_ids.shape[-1]
__UpperCAmelCase : str = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
F"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'''exceptions, performance degradation, or nothing at all.''')
return is_done
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__):
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
F"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'''with `max_length = start_length + max_new_tokens` instead.''' , lowercase__ , )
__UpperCAmelCase : str = start_length
__UpperCAmelCase : Optional[Any] = max_new_tokens
__UpperCAmelCase : Tuple = start_length + max_new_tokens
@add_start_docstrings(lowercase__)
def __call__( self , lowercase__ , lowercase__ , **lowercase__):
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : Optional[int] = max_time
__UpperCAmelCase : Any = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase__)
def __call__( self , lowercase__ , lowercase__ , **lowercase__):
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase ( _UpperCamelCase ):
@add_start_docstrings(lowercase__)
def __call__( self , lowercase__ , lowercase__ , **lowercase__):
return any(criteria(lowercase__ , lowercase__) for criteria in self)
@property
def A( self):
for stopping_criterium in self:
if isinstance(lowercase__ , lowercase__):
return stopping_criterium.max_length
elif isinstance(lowercase__ , lowercase__):
return stopping_criterium.max_length
return None
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> StoppingCriteriaList:
'''simple docstring'''
__UpperCAmelCase : List[str] = stopping_criteria.max_length
__UpperCAmelCase : Dict = deepcopy(lowercase_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , lowercase_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=lowercase_ ) )
return new_stopping_criteria
| 675 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=True , lowercase_="pt" ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = {'''add_prefix_space''': True} if isinstance(lowercase_ , lowercase_ ) and not line.startswith(''' ''' ) else {}
__UpperCAmelCase : List[Any] = padding_side
return tokenizer(
[line] , max_length=lowercase_ , padding='''max_length''' if pad_to_max_length else None , truncation=lowercase_ , return_tensors=lowercase_ , add_special_tokens=lowercase_ , **lowercase_ , )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None , ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = input_ids.ne(lowercase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__="train" , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="" , ):
super().__init__()
__UpperCAmelCase : int = Path(lowercase__).joinpath(type_path + '''.source''')
__UpperCAmelCase : Dict = Path(lowercase__).joinpath(type_path + '''.target''')
__UpperCAmelCase : int = self.get_char_lens(self.src_file)
__UpperCAmelCase : Optional[Any] = max_source_length
__UpperCAmelCase : str = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
__UpperCAmelCase : int = tokenizer
__UpperCAmelCase : str = prefix
if n_obs is not None:
__UpperCAmelCase : Any = self.src_lens[:n_obs]
__UpperCAmelCase : List[Any] = src_lang
__UpperCAmelCase : Dict = tgt_lang
def __len__( self):
return len(self.src_lens)
def __getitem__( self , lowercase__):
__UpperCAmelCase : Union[str, Any] = index + 1 # linecache starts at 1
__UpperCAmelCase : List[str] = self.prefix + linecache.getline(str(self.src_file) , lowercase__).rstrip('''\n''')
__UpperCAmelCase : Optional[int] = linecache.getline(str(self.tgt_file) , lowercase__).rstrip('''\n''')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase__):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCAmelCase : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase__) else self.tokenizer
)
__UpperCAmelCase : Tuple = self.tokenizer.generator if isinstance(self.tokenizer , lowercase__) else self.tokenizer
__UpperCAmelCase : int = encode_line(lowercase__ , lowercase__ , self.max_source_length , '''right''')
__UpperCAmelCase : Optional[int] = encode_line(lowercase__ , lowercase__ , self.max_target_length , '''right''')
__UpperCAmelCase : Tuple = source_inputs['''input_ids'''].squeeze()
__UpperCAmelCase : Optional[Any] = target_inputs['''input_ids'''].squeeze()
__UpperCAmelCase : List[Any] = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def A( lowercase__):
return [len(lowercase__) for x in Path(lowercase__).open().readlines()]
def A( self , lowercase__):
__UpperCAmelCase : Optional[int] = torch.stack([x['''input_ids'''] for x in batch])
__UpperCAmelCase : str = torch.stack([x['''attention_mask'''] for x in batch])
__UpperCAmelCase : Dict = torch.stack([x['''decoder_input_ids'''] for x in batch])
__UpperCAmelCase : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase__)
else self.tokenizer.pad_token_id
)
__UpperCAmelCase : Dict = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase__)
else self.tokenizer.pad_token_id
)
__UpperCAmelCase : str = trim_batch(lowercase__ , lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = trim_batch(lowercase__ , lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : Dict = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase = getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
return list(itertools.chain.from_iterable(lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
__UpperCAmelCase : Any = get_git_info()
save_json(lowercase_ , os.path.join(lowercase_ , '''git_log.json''' ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=4 , **lowercase_ ) -> Dict:
'''simple docstring'''
with open(lowercase_ , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ , indent=lowercase_ , **lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
with open(lowercase_ ) as f:
return json.load(lowercase_ )
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : Any = git.Repo(search_parent_directories=lowercase_ )
__UpperCAmelCase : Optional[Any] = {
'''repo_id''': str(lowercase_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List:
'''simple docstring'''
return list(map(lowercase_ , lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
with open(lowercase_ , '''wb''' ) as f:
return pickle.dump(lowercase_ , lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
def remove_articles(lowercase_ ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , lowercase_ )
def white_space_fix(lowercase_ ):
return " ".join(text.split() )
def remove_punc(lowercase_ ):
__UpperCAmelCase : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = normalize_answer(lowercase_ ).split()
__UpperCAmelCase : int = normalize_answer(lowercase_ ).split()
__UpperCAmelCase : Optional[Any] = Counter(lowercase_ ) & Counter(lowercase_ )
__UpperCAmelCase : int = sum(common.values() )
if num_same == 0:
return 0
__UpperCAmelCase : Optional[int] = 1.0 * num_same / len(lowercase_ )
__UpperCAmelCase : List[Any] = 1.0 * num_same / len(lowercase_ )
__UpperCAmelCase : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
return normalize_answer(lowercase_ ) == normalize_answer(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
assert len(lowercase_ ) == len(lowercase_ )
__UpperCAmelCase : Any = 0
for hypo, pred in zip(lowercase_ , lowercase_ ):
em += exact_match_score(lowercase_ , lowercase_ )
if len(lowercase_ ) > 0:
em /= len(lowercase_ )
return {"em": em}
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : int = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCAmelCase : Optional[int] = '''dropout_rate'''
for p in extra_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
if not hasattr(lowercase_ , lowercase_ ) and not hasattr(lowercase_ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowercase_ ) )
delattr(lowercase_ , lowercase_ )
continue
__UpperCAmelCase : List[Any] = p if hasattr(lowercase_ , lowercase_ ) else equivalent_param[p]
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
delattr(lowercase_ , lowercase_ )
return hparams, config
| 675 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=2 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , lowercase__=0 , ):
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : List[str] = seq_length
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : Optional[Any] = use_input_mask
__UpperCAmelCase : int = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : str = type_vocab_size
__UpperCAmelCase : List[str] = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : Tuple = num_labels
__UpperCAmelCase : int = num_choices
__UpperCAmelCase : Optional[Any] = scope
__UpperCAmelCase : List[str] = projection_dim
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Tuple = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : int = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Dict = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
__UpperCAmelCase : int = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : int = TFDPRContextEncoder(config=lowercase__)
__UpperCAmelCase : Any = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
__UpperCAmelCase : Any = model(lowercase__ , token_type_ids=lowercase__)
__UpperCAmelCase : Union[str, Any] = model(lowercase__)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = TFDPRQuestionEncoder(config=lowercase__)
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
__UpperCAmelCase : str = model(lowercase__ , token_type_ids=lowercase__)
__UpperCAmelCase : Optional[Any] = model(lowercase__)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = TFDPRReader(config=lowercase__)
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
__UpperCAmelCase : List[str] = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_lowerCAmelCase : Tuple = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Tuple = False
def A( self):
__UpperCAmelCase : int = TFDPRModelTester(self)
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowercase__)
def A( self):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowercase__)
@slow
def A( self):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Tuple = TFDPRContextEncoder.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = TFDPRContextEncoder.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[Any] = TFDPRQuestionEncoder.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = TFDPRReader.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
@require_tf
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Any = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''')
__UpperCAmelCase : List[Any] = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]]) # [CLS] hello, is my dog cute? [SEP]
__UpperCAmelCase : Dict = model(lowercase__)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__UpperCAmelCase : Dict = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
])
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1e-4))
| 675 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
__UpperCAmelCase : Optional[int] = gray_code_sequence_string(lowercase_ )
#
# convert them to integers
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = int(sequence[i] , 2 )
return sequence
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__UpperCAmelCase : Union[str, Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__UpperCAmelCase : Any = gray_code_sequence_string(bit_count - 1 )
__UpperCAmelCase : Optional[Any] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__UpperCAmelCase : Any = '''0''' + smaller_sequence[i]
sequence.append(lowercase_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__UpperCAmelCase : List[Any] = '''1''' + smaller_sequence[i]
sequence.append(lowercase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 1 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=3_0 , lowercase__=2 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=1_0 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=None , lowercase__=2 , ):
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[Any] = patch_size
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = scope
__UpperCAmelCase : List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__UpperCAmelCase : List[str] = (image_size // patch_size) ** 2
__UpperCAmelCase : Any = num_patches + 2
def A( self):
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def A( self):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : str = DeiTModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = DeiTForMaskedImageModeling(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Union[str, Any] = model(lowercase__)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : Dict = DeiTForMaskedImageModeling(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__UpperCAmelCase : List[str] = model(lowercase__)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = self.type_sequence_label_size
__UpperCAmelCase : Optional[Any] = DeiTForImageClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : List[Any] = DeiTForImageClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__UpperCAmelCase : int = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def A( self):
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__UpperCAmelCase : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : Tuple = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : int = False
_lowerCAmelCase : Tuple = False
def A( self):
__UpperCAmelCase : Dict = DeiTModelTester(self)
__UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''')
def A( self):
pass
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(lowercase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__UpperCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = model_class(lowercase__)
__UpperCAmelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : str = [*signature.parameters.keys()]
__UpperCAmelCase : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__=False):
__UpperCAmelCase : List[str] = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A( self):
if not self.model_tester.is_training:
return
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase__)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__UpperCAmelCase : Union[str, Any] = model_class(lowercase__)
model.to(lowercase__)
model.train()
__UpperCAmelCase : Optional[int] = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__)
__UpperCAmelCase : Optional[int] = model(**lowercase__).loss
loss.backward()
def A( self):
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCAmelCase : str = False
__UpperCAmelCase : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase__) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__UpperCAmelCase : List[str] = model_class(lowercase__)
model.gradient_checkpointing_enable()
model.to(lowercase__)
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__)
__UpperCAmelCase : Optional[Any] = model(**lowercase__).loss
loss.backward()
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : str = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase__),
*get_values(lowercase__),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}"):
__UpperCAmelCase : Optional[int] = problem_type['''title''']
__UpperCAmelCase : List[Any] = problem_type['''num_labels''']
__UpperCAmelCase : Any = model_class(lowercase__)
model.to(lowercase__)
model.train()
__UpperCAmelCase : str = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__)
if problem_type["num_labels"] > 1:
__UpperCAmelCase : Optional[int] = inputs['''labels'''].unsqueeze(1).repeat(1 , problem_type['''num_labels'''])
__UpperCAmelCase : int = inputs['''labels'''].to(problem_type['''dtype'''])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase__) as warning_list:
__UpperCAmelCase : Optional[Any] = model(**lowercase__).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}")
loss.backward()
@slow
def A( self):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[Any] = DeiTModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''')
if is_vision_available()
else None
)
@slow
def A( self):
__UpperCAmelCase : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''').to(
lowercase__)
__UpperCAmelCase : List[Any] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : Tuple = image_processor(images=lowercase__ , return_tensors='''pt''').to(lowercase__)
# forward pass
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(**lowercase__)
# verify the logits
__UpperCAmelCase : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowercase__)
__UpperCAmelCase : List[str] = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1]).to(lowercase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def A( self):
__UpperCAmelCase : str = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''')
__UpperCAmelCase : Tuple = self.default_image_processor
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : Tuple = image_processor(images=lowercase__ , return_tensors='''pt''')
__UpperCAmelCase : int = inputs.pixel_values.to(lowercase__)
# forward pass to make sure inference works in fp16
with torch.no_grad():
__UpperCAmelCase : Any = model(lowercase__)
| 675 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = state_dict.pop(lowercase_ )
__UpperCAmelCase : Tuple = val
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__UpperCAmelCase : Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__UpperCAmelCase : int = value
else:
__UpperCAmelCase : List[str] = value
return new_state_dict
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : int = ''''''
if is_panoptic:
__UpperCAmelCase : List[Any] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCAmelCase : Dict = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__UpperCAmelCase : Dict = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Any = in_proj_weight[:256, :]
__UpperCAmelCase : Dict = in_proj_bias[:256]
__UpperCAmelCase : str = in_proj_weight[256:512, :]
__UpperCAmelCase : Any = in_proj_bias[256:512]
__UpperCAmelCase : List[str] = in_proj_weight[-256:, :]
__UpperCAmelCase : Tuple = in_proj_bias[-256:]
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Union[str, Any] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__UpperCAmelCase : Dict = '''resnet101'''
if "dc5" in model_name:
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Optional[Any] = '''panoptic''' in model_name
if is_panoptic:
__UpperCAmelCase : Optional[int] = 250
else:
__UpperCAmelCase : Dict = 91
__UpperCAmelCase : Optional[Any] = '''huggingface/label-files'''
__UpperCAmelCase : int = '''coco-detection-id2label.json'''
__UpperCAmelCase : Tuple = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : Optional[Any] = {int(lowercase_ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = idalabel
__UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
# load image processor
__UpperCAmelCase : Any = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__UpperCAmelCase : str = ConditionalDetrImageProcessor(format=lowercase_ )
# prepare image
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : Optional[int] = image_processor(images=lowercase_ , return_tensors='''pt''' )
__UpperCAmelCase : List[Any] = encoding['''pixel_values''']
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
__UpperCAmelCase : Optional[Any] = torch.hub.load('''DeppMeng/ConditionalDETR''' , lowercase_ , pretrained=lowercase_ ).eval()
__UpperCAmelCase : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__UpperCAmelCase : str = '''conditional_detr.''' + src
rename_key(lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : Any = rename_backbone_keys(lowercase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase_ , is_panoptic=lowercase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCAmelCase : Optional[int] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__UpperCAmelCase : Union[str, Any] = state_dict.pop(lowercase_ )
__UpperCAmelCase : str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__UpperCAmelCase : Tuple = state_dict.pop(lowercase_ )
__UpperCAmelCase : int = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__UpperCAmelCase : Dict = state_dict.pop(lowercase_ )
__UpperCAmelCase : str = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__UpperCAmelCase : List[Any] = state_dict.pop(lowercase_ )
__UpperCAmelCase : int = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : str = ConditionalDetrForSegmentation(lowercase_ ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
model.push_to_hub(repo_id=lowercase_ , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
__UpperCAmelCase : List[Any] = conditional_detr(lowercase_ )
__UpperCAmelCase : int = model(lowercase_ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCAmelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 675 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCAmelCase = HfArgumentParser(InitializationArguments)
lowerCAmelCase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCAmelCase = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
lowerCAmelCase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCAmelCase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 675 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 1 |
from math import isqrt, loga
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : str = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowercase_ , lowercase_ ):
__UpperCAmelCase : List[str] = False
return [i for i in range(2 , lowercase_ ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( lowercase_ = 800800 , lowercase_ = 800800 ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = degree * loga(lowercase_ )
__UpperCAmelCase : List[Any] = int(lowercase_ )
__UpperCAmelCase : int = calculate_prime_numbers(lowercase_ )
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'{solution() = }')
| 675 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.