code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 0 |
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = tempfile.mkdtemp()
_a = BlipImageProcessor()
_a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
_a = BlipaProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : int , **lowerCAmelCase_ : Any ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def __lowerCAmelCase ( self : Tuple , **lowerCAmelCase_ : Any ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
_a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_a = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_a = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_a = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = self.prepare_image_inputs()
_a = image_processor(lowerCAmelCase_ , return_tensors='''np''' )
_a = processor(images=lowerCAmelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = '''lower newer'''
_a = processor(text=lowerCAmelCase_ )
_a = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = '''lower newer'''
_a = self.prepare_image_inputs()
_a = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a = processor.batch_decode(lowerCAmelCase_ )
_a = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = '''lower newer'''
_a = self.prepare_image_inputs()
_a = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 22 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : List[Any] = logging.get_logger(__name__)
def _snake_case (__lowercase):
# initialize config
if "resnet-50" in model_name:
UpperCamelCase_ = ResNetConfig.from_pretrained('microsoft/resnet-50')
elif "resnet-101" in model_name:
UpperCamelCase_ = ResNetConfig.from_pretrained('microsoft/resnet-101')
else:
raise ValueError('Model name should include either resnet50 or resnet101')
UpperCamelCase_ = DetrConfig(use_timm_backbone=__lowercase , backbone_config=__lowercase)
# set label attributes
UpperCamelCase_ = 'panoptic' in model_name
if is_panoptic:
UpperCamelCase_ = 250
else:
UpperCamelCase_ = 91
UpperCamelCase_ = 'huggingface/label-files'
UpperCamelCase_ = 'coco-detection-id2label.json'
UpperCamelCase_ = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset') , 'r'))
UpperCamelCase_ = {int(__lowercase): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _snake_case (__lowercase):
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase_ = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight'))
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight'))
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias'))
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean'))
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var'))
# stages
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
))
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
))
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
))
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
))
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
))
# 3 convs
for i in range(3):
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
))
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
))
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
))
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
))
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
))
# fmt: on
for i in range(config.encoder_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
f"""encoder.layers.{i}.self_attn.out_proj.weight""",
))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
f"""decoder.layers.{i}.self_attn.out_proj.weight""",
))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias"""))
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
))
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
])
return rename_keys
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = state_dict.pop(__lowercase)
UpperCamelCase_ = val
def _snake_case (__lowercase , __lowercase=False):
UpperCamelCase_ = ''
if is_panoptic:
UpperCamelCase_ = 'detr.'
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""")
UpperCamelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""")
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ = in_proj_weight[:256, :]
UpperCamelCase_ = in_proj_bias[:256]
UpperCamelCase_ = in_proj_weight[256:512, :]
UpperCamelCase_ = in_proj_bias[256:512]
UpperCamelCase_ = in_proj_weight[-256:, :]
UpperCamelCase_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase_ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""")
UpperCamelCase_ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""")
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ = in_proj_weight[:256, :]
UpperCamelCase_ = in_proj_bias[:256]
UpperCamelCase_ = in_proj_weight[256:512, :]
UpperCamelCase_ = in_proj_bias[256:512]
UpperCamelCase_ = in_proj_weight[-256:, :]
UpperCamelCase_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase_ = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""")
UpperCamelCase_ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""")
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase_ = in_proj_weight_cross_attn[:256, :]
UpperCamelCase_ = in_proj_bias_cross_attn[:256]
UpperCamelCase_ = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase_ = in_proj_bias_cross_attn[256:512]
UpperCamelCase_ = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase_ = in_proj_bias_cross_attn[-256:]
def _snake_case ():
UpperCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ = Image.open(requests.get(__lowercase , stream=__lowercase).raw)
return im
@torch.no_grad()
def _snake_case (__lowercase , __lowercase=None , __lowercase=False):
UpperCamelCase_ , UpperCamelCase_ = get_detr_config(__lowercase)
# load original model from torch hub
UpperCamelCase_ = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(f"""Converting model {model_name}...""")
UpperCamelCase_ = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=__lowercase).eval()
UpperCamelCase_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__lowercase):
if is_panoptic:
UpperCamelCase_ = 'detr.' + src
rename_key(__lowercase , __lowercase , __lowercase)
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase , is_panoptic=__lowercase)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase_ = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr')
and not key.startswith('class_labels_classifier')
and not key.startswith('bbox_predictor')
):
UpperCamelCase_ = state_dict.pop(__lowercase)
UpperCamelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase_ = state_dict.pop(__lowercase)
UpperCamelCase_ = val
elif key.startswith('bbox_attention') or key.startswith('mask_head'):
continue
else:
UpperCamelCase_ = state_dict.pop(__lowercase)
UpperCamelCase_ = val
else:
if not key.startswith('class_labels_classifier') and not key.startswith('bbox_predictor'):
UpperCamelCase_ = state_dict.pop(__lowercase)
UpperCamelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCamelCase_ = DetrForSegmentation(__lowercase) if is_panoptic else DetrForObjectDetection(__lowercase)
model.load_state_dict(__lowercase)
model.eval()
# verify our conversion on an image
UpperCamelCase_ = 'coco_panoptic' if is_panoptic else 'coco_detection'
UpperCamelCase_ = DetrImageProcessor(format=__lowercase)
UpperCamelCase_ = processor(images=prepare_img() , return_tensors='pt')
UpperCamelCase_ = encoding['pixel_values']
UpperCamelCase_ = detr(__lowercase)
UpperCamelCase_ = model(__lowercase)
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3)
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3)
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4)
print('Looks ok!')
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""")
Path(__lowercase).mkdir(exist_ok=__lowercase)
model.save_pretrained(__lowercase)
processor.save_pretrained(__lowercase)
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...')
model.push_to_hub(f"""nielsr/{model_name}""")
processor.push_to_hub(f"""nielsr/{model_name}""")
if __name__ == "__main__":
snake_case__ : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
snake_case__ : List[str] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 23 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase_ : Optional[Any] = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 0 |
import logging
import os
from .state import PartialState
class _UpperCamelCase ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( a : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __UpperCamelCase ( self : Optional[int] , a : Optional[Any] , a : Optional[int] , *a : Dict , **a : Any ) -> Union[str, Any]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("main_process_only" , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("in_order" , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
SCREAMING_SNAKE_CASE : Any = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def lowerCamelCase__ ( _a , _a = None):
if log_level is None:
SCREAMING_SNAKE_CASE : Dict = os.environ.get("ACCELERATE_LOG_LEVEL" , _a)
SCREAMING_SNAKE_CASE : int = logging.getLogger(_a)
if log_level is not None:
logger.setLevel(log_level.upper())
logger.root.setLevel(log_level.upper())
return MultiProcessAdapter(_a , {})
| 25 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "EncodecFeatureExtractor"
a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
__UpperCAmelCase = self.feature_extractor
__UpperCAmelCase = False
def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
__UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__UpperCAmelCase = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__UpperCAmelCase = audio_inputs['''padding_mask''']
return inputs
def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ):
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ):
__UpperCAmelCase = to_numpy(_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(_lowercase )
__UpperCAmelCase = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__UpperCAmelCase = seq_len - padding_mask.shape[-1]
__UpperCAmelCase = 1 - self.feature_extractor.padding_value
__UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
__UpperCAmelCase = audio_values.tolist()
for i in range(_lowercase ):
__UpperCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 49 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _A ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = AltDiffusionPipeline
lowercase__: Tuple = TEXT_TO_IMAGE_PARAMS
lowercase__: List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__: List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__: List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__snake_case : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
__snake_case : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__snake_case : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
__snake_case : Dict = CLIPTextModel(__magic_name__ )
__snake_case : Optional[int] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
__snake_case : str = 77
__snake_case : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Any=0 ) -> Optional[int]:
"""simple docstring"""
if str(__magic_name__ ).startswith("""mps""" ):
__snake_case : int = torch.manual_seed(__magic_name__ )
else:
__snake_case : Optional[Any] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
__snake_case : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowercase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__snake_case : Optional[Any] = self.get_dummy_components()
torch.manual_seed(0 )
__snake_case : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
__snake_case : Optional[int] = RobertaSeriesModelWithTransformation(__magic_name__ )
__snake_case : Optional[Any] = text_encoder
__snake_case : Any = AltDiffusionPipeline(**__magic_name__ )
__snake_case : Any = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : int = self.get_dummy_inputs(__magic_name__ )
__snake_case : Union[str, Any] = """A photo of an astronaut"""
__snake_case : Any = alt_pipe(**__magic_name__ )
__snake_case : List[Any] = output.images
__snake_case : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : int = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
__snake_case : Any = self.get_dummy_components()
__snake_case : List[Any] = PNDMScheduler(skip_prk_steps=__magic_name__ )
torch.manual_seed(0 )
__snake_case : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
__snake_case : Dict = RobertaSeriesModelWithTransformation(__magic_name__ )
__snake_case : Tuple = text_encoder
__snake_case : Optional[Any] = AltDiffusionPipeline(**__magic_name__ )
__snake_case : Optional[int] = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : Dict = self.get_dummy_inputs(__magic_name__ )
__snake_case : List[Any] = alt_pipe(**__magic_name__ )
__snake_case : Dict = output.images
__snake_case : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : str = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=__magic_name__ )
__snake_case : List[str] = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : int = """A painting of a squirrel eating a burger"""
__snake_case : Tuple = torch.manual_seed(0 )
__snake_case : Dict = alt_pipe([prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
__snake_case : Optional[Any] = output.images
__snake_case : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : str = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
__snake_case : Dict = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=__magic_name__ , safety_checker=__magic_name__ )
__snake_case : List[str] = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : Dict = """A painting of a squirrel eating a burger"""
__snake_case : Dict = torch.manual_seed(0 )
__snake_case : Any = alt_pipe([prompt] , generator=__magic_name__ , num_inference_steps=2 , output_type="""numpy""" )
__snake_case : List[Any] = output.images
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Union[str, Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 26 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = int(_SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(_SCREAMING_SNAKE_CASE )
_A, _A = divmod(_SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(_SCREAMING_SNAKE_CASE ) + str(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = str(_SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('No input value was provided' )
_A = '-' if number.startswith('-' ) else ''
_A = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F"{negative}0b{binary_recursive(int(_SCREAMING_SNAKE_CASE ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 27 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 | 0 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
A_ = """CompVis/stable-diffusion-v1-1"""
A_ = """CompVis/stable-diffusion-v1-2"""
A_ = """CompVis/stable-diffusion-v1-3"""
A_ = """CompVis/stable-diffusion-v1-4"""
class __lowerCamelCase ( lowerCAmelCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ):
super()._init_()
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase )
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase )
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase )
lowerCamelCase_ = StableDiffusionPipeline(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , requires_safety_checker=UpperCAmelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase__ ( self ):
return {k: getattr(self , UpperCAmelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def UpperCAmelCase__ ( self , UpperCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.enable_attention_slicing(UpperCAmelCase )
@torch.no_grad()
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = 512 , UpperCAmelCase = 512 , UpperCAmelCase = 50 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ):
return self.pipea(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
@torch.no_grad()
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = 512 , UpperCAmelCase = 512 , UpperCAmelCase = 50 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ):
return self.pipea(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
@torch.no_grad()
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = 512 , UpperCAmelCase = 512 , UpperCAmelCase = 50 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ):
return self.pipea(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
@torch.no_grad()
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = 512 , UpperCAmelCase = 512 , UpperCAmelCase = 50 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ):
return self.pipea(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
@torch.no_grad()
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = 512 , UpperCAmelCase = 512 , UpperCAmelCase = 50 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ):
lowerCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(UpperCAmelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCamelCase_ = self.textaimg_sda_a(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCamelCase_ = self.textaimg_sda_a(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCamelCase_ = self.textaimg_sda_a(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCamelCase_ = self.textaimg_sda_a(
prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 29 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
_lowercase : Any = input('Enter integers separated by spaces: ')
_lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 49 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__a = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=None ):
'''simple docstring'''
UpperCAmelCase_ : Dict = XLNetConfig.from_json_file(_lowercase )
UpperCAmelCase_ : Optional[int] = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
UpperCAmelCase_ : Optional[int] = finetuning_task
UpperCAmelCase_ : Optional[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase_ : List[str] = XLNetForSequenceClassification(_lowercase )
elif "squad" in finetuning_task:
UpperCAmelCase_ : Dict = finetuning_task
UpperCAmelCase_ : List[str] = XLNetForQuestionAnswering(_lowercase )
else:
UpperCAmelCase_ : List[str] = XLNetLMHeadModel(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
UpperCAmelCase_ : int = os.path.join(_lowercase , _lowercase )
UpperCAmelCase_ : Union[str, Any] = os.path.join(_lowercase , _lowercase )
print(f'''Save PyTorch model to {os.path.abspath(_lowercase )}''' )
torch.save(model.state_dict() , _lowercase )
print(f'''Save configuration file to {os.path.abspath(_lowercase )}''' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
__a = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 30 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 0 |
def UpperCAmelCase_ ( __UpperCAmelCase : int = 2_00 ) -> int:
SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
SCREAMING_SNAKE_CASE_ = [0] * (pence + 1)
SCREAMING_SNAKE_CASE_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__UpperCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 31 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
UpperCAmelCase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = '''Morse code here!'''
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 32 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowercase : int = {
'yjernite/retribert-base-uncased': 5_12,
}
_lowercase : Any = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = RetriBertTokenizer
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 49 | 0 |
import torch
from diffusers import StableDiffusionPipeline
lowerCamelCase__ : Any = """path-to-your-trained-model"""
lowerCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
lowerCamelCase__ : int = """A photo of sks dog in a bucket"""
lowerCamelCase__ : Any = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 33 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : Dict = 'bart'
_lowercase : Dict = True
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCAmelCase = qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCAmelCase = sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = faiss.StandardGpuResources()
__UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCAmelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
__UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
__UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCAmelCase = elia['''train_eli5''']
__UpperCAmelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_lowercase ,_lowercase ,_lowercase : Dict = load_indexes()
_lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models()
_lowercase ,_lowercase : Tuple = load_train_data()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ):
__UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ )
__UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ):
if source == "none":
__UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
__UpperCAmelCase , __UpperCAmelCase = query_es_index(
snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , )
__UpperCAmelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ):
with torch.no_grad():
__UpperCAmelCase = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_lowercase : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
_lowercase : Tuple = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_lowercase : List[str] = action_list.index(action_st)
_lowercase : str = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_lowercase : int = show_type == 'Show full text of passages'
else:
_lowercase : str = 3
_lowercase : List[Any] = True
_lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_lowercase : List[str] = 'wiki40b'
_lowercase : Optional[int] = 'dense'
_lowercase : List[Any] = 'beam'
_lowercase : str = 2
_lowercase : Optional[int] = 64
_lowercase : Union[str, Any] = 2_56
_lowercase : List[str] = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
_lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_lowercase : Optional[int] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_lowercase : Optional[Any] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : Dict = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Union[str, Any] = None
# start main text
_lowercase : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : Optional[Any] = st.text_input('Enter your question here:', '')
else:
_lowercase : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10)
_lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
_lowercase : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Any = support_list[:10]
_lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowercase ,_lowercase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_lowercase : Any = res[1].strip()
if sec_titles == "":
_lowercase : Dict = '[{}]({})'.format(res[0], wiki_url)
else:
_lowercase : List[Any] = sec_titles.split(' & ')
_lowercase : int = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : List[Any] = find_nearest_training(question)
_lowercase : Tuple = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_lowercase : int = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 | 0 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
a_ :str = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
a_ :int = 'sshleifer/student_marian_en_ro_6_1'
a_ :List[str] = 'sshleifer/tiny-mbart'
@require_torch
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : List[str] , _lowercase : Dict=False , _lowercase : Optional[Any]=None , _lowercase : Dict=True , _lowercase : str=True , _lowercase : Optional[Any]=True , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Any = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
SCREAMING_SNAKE_CASE__ : List[str] = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [log for log in logs if '''eval_loss''' in log.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
SCREAMING_SNAKE_CASE__ : Optional[Any] = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowercase__ ( self : int ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowercase__ ( self : Optional[int] ):
self.run_seqaseq_quick(distributed=_lowercase )
@require_torch_multi_gpu
def lowercase__ ( self : Any ):
self.run_seqaseq_quick(distributed=_lowercase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowercase__ ( self : str ):
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowercase__ ( self : List[Any] ):
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowercase__ ( self : int ):
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowercase__ ( self : Any ):
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase )
@require_apex
@require_torch_gpu
def lowercase__ ( self : Any ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def lowercase__ ( self : Optional[int] , _lowercase : Optional[int] ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
SCREAMING_SNAKE_CASE__ : Dict = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = experiments[experiment_id]
SCREAMING_SNAKE_CASE__ : str = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
SCREAMING_SNAKE_CASE__ : int = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''] )
SCREAMING_SNAKE_CASE__ : Tuple = len(re.findall(_lowercase , cl.err ) )
self.assertEqual(_lowercase , data['''n_matches'''] )
@slow
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
SCREAMING_SNAKE_CASE__ : Any = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''' ) ).log_history
SCREAMING_SNAKE_CASE__ : str = [log for log in logs if '''eval_loss''' in log.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = eval_metrics[0]
SCREAMING_SNAKE_CASE__ : Any = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase )
# test if do_predict saves generations and metrics
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.listdir(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {os.path.basename(_lowercase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowercase__ ( self : Dict ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase : str ) -> Tuple[int, float]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''--skip_memory_metrics 0'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.run_trainer(
max_len=1_28 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
SCREAMING_SNAKE_CASE__ : Any = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''' ) ).log_history
SCREAMING_SNAKE_CASE__ : Tuple = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
SCREAMING_SNAKE_CASE__ : str = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
SCREAMING_SNAKE_CASE__ : str = gpu_peak_mem_orig + gpu_alloc_mem_orig
SCREAMING_SNAKE_CASE__ : List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
SCREAMING_SNAKE_CASE__ : Optional[Any] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
SCREAMING_SNAKE_CASE__ : List[str] = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
_lowercase , _lowercase , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def lowercase__ ( self : str , _lowercase : int , _lowercase : str , _lowercase : int , _lowercase : float = 3E-3 , _lowercase : str = "adafactor" , _lowercase : bool = False , _lowercase : str = None , _lowercase : int = 0 , _lowercase : bool = True , _lowercase : bool = True , _lowercase : bool = True , _lowercase : bool = True , _lowercase : int = None , ):
SCREAMING_SNAKE_CASE__ : Any = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
SCREAMING_SNAKE_CASE__ : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Dict = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(_lowercase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(_lowercase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(_lowercase )}
""".split()
SCREAMING_SNAKE_CASE__ : List[Any] = '''
--do_predict
'''.split()
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
SCREAMING_SNAKE_CASE__ : str = get_gpu_count()
SCREAMING_SNAKE_CASE__ : int = get_torch_dist_unique_port()
SCREAMING_SNAKE_CASE__ : Optional[int] = f"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
SCREAMING_SNAKE_CASE__ : int = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env() )
else:
SCREAMING_SNAKE_CASE__ : int = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase ):
main()
return output_dir
| 35 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Optional[int] = logging.get_logger(__name__)
def lowercase ( __A : Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = torch.load(__A , map_location="""cpu""" )
if "model" in sd.keys():
snake_case : str = torch.load(__A , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
snake_case : Optional[Any] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__A )
snake_case : Union[str, Any] = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case : Dict = sd.pop(__A )
snake_case : List[str] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case : Dict = sd[key]
# We split QKV in separate Q,K,V
snake_case : str = key.replace(""".qkv_proj.""" , """.q_proj.""" )
snake_case : List[str] = key.replace(""".qkv_proj.""" , """.k_proj.""" )
snake_case : Union[str, Any] = key.replace(""".qkv_proj.""" , """.v_proj.""" )
snake_case : Optional[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case , snake_case , snake_case : Union[str, Any] = torch.split(__A , depth // 3 , dim=0 )
snake_case : Optional[int] = q
snake_case : Optional[Any] = k
snake_case : Any = v
del sd[key]
return sd
@torch.no_grad()
def lowercase ( __A : Tuple , __A : Dict , __A : List[str]=None ) -> Tuple:
'''simple docstring'''
snake_case : Optional[int] = load_checkpoint(__A )
if config is not None:
snake_case : List[Any] = OPTConfig.from_pretrained(__A )
else:
snake_case : Union[str, Any] = OPTConfig()
snake_case : str = OPTModel(__A ).half().eval()
model.load_state_dict(__A )
# Check results
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__lowercase : Union[str, Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 36 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ):
super().__init__()
a__ : int = module
a__ : Any = nn.Sequential(
nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , )
a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
_lowercase = 'bigscience/bloom-1b7'
# Constant values
_lowercase = 2.1_09_65_95_52_69_25_74
_lowercase = 'Hello my name is'
_lowercase = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
_lowercase = 1_0
def _UpperCamelCase( self : Dict ):
# Models and tokenizer
a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Union[str, Any] ):
super().setUp()
# Models and tokenizer
a__ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : List[Any] ):
a__ : str = self.model_abit.config
self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) )
a__ : Optional[Any] = config.to_dict()
a__ : int = config.to_diff_dict()
a__ : List[str] = config.to_json_string()
def _UpperCamelCase( self : int ):
from bitsandbytes.nn import Paramsabit
a__ : List[Any] = self.model_fpaa.get_memory_footprint()
a__ : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a__ : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _UpperCamelCase( self : Tuple ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCamelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _UpperCamelCase( self : str ):
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[Any] = BitsAndBytesConfig()
a__ : Optional[int] = True
a__ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" )
a__ : str = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : int = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : Dict ):
with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : int = BitsAndBytesConfig()
with self.assertRaises(lowerCamelCase__ ):
a__ : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def _UpperCamelCase( self : int ):
with self.assertRaises(lowerCamelCase__ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a__ : int = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Any = self.model_fpaa.to(torch.floataa )
a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.to("cpu" )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.half()
# Check this does not throw an error
a__ : Dict = self.model_fpaa.float()
def _UpperCamelCase( self : Dict ):
a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCamelCase( cls : str ):
a__ : Dict = "t5-small"
a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
a__ : int = AutoTokenizer.from_pretrained(cls.model_name )
a__ : str = "Translate in German: Hello, my dog is cute"
def _UpperCamelCase( self : Optional[int] ):
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Optional[int] ):
from transformers import TaForConditionalGeneration
a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules
a__ : Optional[Any] = None
# test with `t5-small`
a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Any = model.generate(**lowerCamelCase__ )
a__ : Union[str, Any] = modules
def _UpperCamelCase( self : List[Any] ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Optional[int] = model.generate(**lowerCamelCase__ )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : List[str] ):
super().setUp()
# model_name
a__ : Union[str, Any] = "bigscience/bloom-560m"
a__ : Union[str, Any] = "t5-small"
# Different types of model
a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Sequence classification model
a__ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# CausalLM model
a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Seq2seq model
a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Union[str, Any] ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
super().setUp()
def _UpperCamelCase( self : int ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Tuple ):
a__ : int = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a__ : Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Tuple ):
super().setUp()
def _UpperCamelCase( self : List[Any] ):
a__ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
a__ : Any = "facebook/opt-350m"
super().setUp()
def _UpperCamelCase( self : int ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a__ : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a__ : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCamelCase__ ) ):
a__ : Dict = LoRALayer(module.q_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.k_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a__ : Optional[Any] = model.forward(**lowerCamelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCamelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gpt2-xl'
_lowercase = 3.31_91_85_48_54_15_21_87
| 37 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 | 0 |
'''simple docstring'''
import os
import sys
import unittest
A_ : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
A_ : Dict = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
A_ : str = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = get_test_to_tester_mapping(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = get_test_to_tester_mapping(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {"""BertModelTest""": """BertModelTester"""}
snake_case__ : List[str] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = get_model_to_test_mapping(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = get_model_to_test_mapping(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
snake_case__ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = get_model_to_tester_mapping(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = get_model_to_tester_mapping(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
snake_case__ : Optional[Any] = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
| 38 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase_ = False
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Tuple ) ->Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : Dict ) ->List[str]:
snake_case_ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger '''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCamelCase )
snake_case_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = generator.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def snake_case__( self : List[str] ) ->Tuple:
snake_case_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger '''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
snake_case_ = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 39 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 | 0 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCAmelCase__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ = f'down_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'input_blocks.{3*i + j + 1}.0.'
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ = f'down_blocks.{i}.attentions.{j}.'
lowerCAmelCase__ = f'input_blocks.{3*i + j + 1}.1.'
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ = f'up_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'output_blocks.{3*i + j}.0.'
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ = f'up_blocks.{i}.attentions.{j}.'
lowerCAmelCase__ = f'output_blocks.{3*i + j}.1.'
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ = f'down_blocks.{i}.downsamplers.0.conv.'
lowerCAmelCase__ = f'input_blocks.{3*(i+1)}.0.op.'
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ = f'up_blocks.{i}.upsamplers.0.'
lowerCAmelCase__ = f'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ = '''mid_block.attentions.0.'''
lowerCAmelCase__ = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ = f'mid_block.resnets.{j}.'
lowerCAmelCase__ = f'middle_block.{2*j}.'
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _A ( A__ ):
"""simple docstring"""
__lowercase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
__lowercase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
__lowercase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ = f'encoder.down_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'encoder.down.{i}.block.{j}.'
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ = f'down_blocks.{i}.downsamplers.0.'
lowerCAmelCase__ = f'down.{i}.downsample.'
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ = f'up_blocks.{i}.upsamplers.0.'
lowerCAmelCase__ = f'up.{3-i}.upsample.'
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ = f'decoder.up_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'decoder.up.{3-i}.block.{j}.'
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ = f'mid_block.resnets.{i}.'
lowerCAmelCase__ = f'mid.block_{i+1}.'
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def _A ( A__ ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _A ( A__ ):
"""simple docstring"""
__lowercase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
__lowercase = {v: vae_state_dict[k] for k, v in mapping.items()}
__lowercase = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"mid.attn_1.{weight_name}.weight" in k:
print(F"Reshaping {k} for SD format" )
__lowercase = reshape_weight_for_sd(A__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCAmelCase__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ = {'''q''': 0, '''k''': 1, '''v''': 2}
def _A ( A__ ):
"""simple docstring"""
__lowercase = {}
__lowercase = {}
__lowercase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
__lowercase = k[: -len('''.q_proj.weight''' )]
__lowercase = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
__lowercase = [None, None, None]
__lowercase = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
__lowercase = k[: -len('''.q_proj.bias''' )]
__lowercase = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
__lowercase = [None, None, None]
__lowercase = v
continue
__lowercase = textenc_pattern.sub(lambda A__ : protected[re.escape(m.group(0 ) )] , A__ )
__lowercase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__lowercase = textenc_pattern.sub(lambda A__ : protected[re.escape(m.group(0 ) )] , A__ )
__lowercase = torch.cat(A__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__lowercase = textenc_pattern.sub(lambda A__ : protected[re.escape(m.group(0 ) )] , A__ )
__lowercase = torch.cat(A__ )
return new_state_dict
def _A ( A__ ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCAmelCase__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ = load_file(unet_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCAmelCase__ = load_file(vae_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCAmelCase__ = load_file(text_enc_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCAmelCase__ = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCAmelCase__ = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 41 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49 | 0 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = "bert-base-cased" ) -> List[Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = load_dataset('glue' ,'mrpc' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCamelCase_ = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
model.eval()
lowerCamelCase_ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
lowerCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# Initialize accelerator
lowerCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config['lr']
lowerCamelCase_ = int(config['num_epochs'] )
lowerCamelCase_ = int(config['seed'] )
lowerCamelCase_ = int(config['batch_size'] )
lowerCamelCase_ = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ = 1
lowerCamelCase_ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCamelCase_ = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ = 0
lowerCamelCase_ = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ = int(__UpperCamelCase ) + 1
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
accelerator.print('resumed checkpoint performance:' ,__UpperCamelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ = f'''epoch_{epoch}'''
lowerCamelCase_ = os.path.join(args.output_dir ,__UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = accuracy
lowerCamelCase_ = lr_scheduler.get_lr()[0]
lowerCamelCase_ = optimizer.param_groups[0]['lr']
lowerCamelCase_ = epoch
lowerCamelCase_ = overall_step
accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__UpperCamelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,)
parser.add_argument(
'--output_dir' ,type=__UpperCamelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__UpperCamelCase ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 42 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 0 |
from collections.abc import Sequence
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 0.0
for coeff in reversed(SCREAMING_SNAKE_CASE ):
lowercase__ = result * x + coeff
return result
if __name__ == "__main__":
lowerCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCAmelCase = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 43 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase__ :
def __init__( self : Optional[int] ):
_lowerCamelCase : List[Any] = [2, 1, 2, -1]
_lowerCamelCase : Any = [1, 2, 3, 4]
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = len(self.first_signal )
_lowerCamelCase : str = len(self.second_signal )
_lowerCamelCase : Tuple = max(__A,__A )
# create a zero matrix of max_length x max_length
_lowerCamelCase : Optional[int] = [[0] * max_length for i in range(__A )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__A ):
_lowerCamelCase : str = deque(self.second_signal )
rotated_signal.rotate(__A )
for j, item in enumerate(__A ):
matrix[i][j] += item
# multiply the matrix with the first signal
_lowerCamelCase : str = np.matmul(np.transpose(__A ),np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__A,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 44 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
UpperCamelCase = "bert-base-cased"
UpperCamelCase = "fp16"
UpperCamelCase = "bf16"
UpperCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :List[str] ):
super().setUp()
UpperCamelCase__ :str = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def __a ( self :Union[str, Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :List[Any] = f"""{i + 1}"""
UpperCamelCase__ :List[Any] = strategy
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __a ( self :Union[str, Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :Optional[int] = prefetch_policy
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Dict = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __a ( self :Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :Tuple = state_dict_type
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = AutoModel.from_pretrained(lowerCamelCase__ )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :int = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCamelCase__ :Optional[Any] = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
UpperCamelCase__ :Union[str, Any] = """2000"""
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :int = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :str = """TRANSFORMER_BASED_WRAP"""
UpperCamelCase__ :Union[str, Any] = """T5Layer"""
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Any = FullyShardedDataParallelPlugin()
with self.assertRaises(lowerCamelCase__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
UpperCamelCase__ :Dict = self.dist_env.copy()
UpperCamelCase__ :int = """SIZE_BASED_WRAP"""
UpperCamelCase__ :Union[str, Any] = """0"""
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __a ( self :Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCamelCase__ :Dict = self.dist_env.copy()
UpperCamelCase__ :Dict = mp_dtype
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = Accelerator()
if mp_dtype == "fp16":
UpperCamelCase__ :Tuple = torch.floataa
elif mp_dtype == "bf16":
UpperCamelCase__ :Tuple = torch.bfloataa
UpperCamelCase__ :int = MixedPrecision(param_dtype=lowerCamelCase__ , reduce_dtype=lowerCamelCase__ , buffer_dtype=lowerCamelCase__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowerCamelCase__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , lowerCamelCase__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowerCamelCase__ )
def __a ( self :Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCamelCase__ :List[str] = self.dist_env.copy()
UpperCamelCase__ :Dict = str(lowerCamelCase__ ).lower()
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowerCamelCase__ ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :Dict ):
super().setUp()
UpperCamelCase__ :str = 0.82
UpperCamelCase__ :int = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
UpperCamelCase__ :int = {
"""multi_gpu_fp16""": 32_00,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 20_00,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCamelCase__ :Optional[Any] = 1_60
UpperCamelCase__ :List[str] = 1_60
UpperCamelCase__ :Union[str, Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase__ :Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def __a ( self :str ):
UpperCamelCase__ :int = os.path.join(self.test_scripts_folder , """test_performance.py""" )
UpperCamelCase__ :List[str] = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
UpperCamelCase__ :Optional[Any] = cmd.copy()
for i, strategy in enumerate(lowerCamelCase__ ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
def __a ( self :str ):
UpperCamelCase__ :List[Any] = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
UpperCamelCase__ :Any = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
UpperCamelCase__ :Optional[int] = len(lowerCamelCase__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCamelCase__ :Tuple = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
UpperCamelCase__ :List[Any] = cmd_config[:-1]
UpperCamelCase__ :Tuple = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
UpperCamelCase__ :Optional[int] = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCamelCase__ :Optional[int] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(lowerCamelCase__ ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
| 45 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 | 0 |
"""simple docstring"""
import os
import sys
import unittest
_lowerCAmelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
_lowerCAmelCase : str = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = get_test_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = get_test_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Dict = {"BertModelTest": "BertModelTester"}
_lowerCamelCase : Union[str, Any] = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = get_model_to_test_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = get_model_to_test_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
_lowerCamelCase : Optional[Any] = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = get_model_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Dict = get_model_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
_lowerCamelCase : List[str] = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
| 46 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 0 |
SCREAMING_SNAKE_CASE__ = tuple[float, float, float]
SCREAMING_SNAKE_CASE__ = tuple[float, float, float]
def UpperCAmelCase__ ( lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad ):
__a : int = end_pointa[0] - end_pointa[0]
__a : List[Any] = end_pointa[1] - end_pointa[1]
__a : List[str] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCAmelCase__ ( lowerCamelCase_ : Vectorad , lowerCamelCase_ : Vectorad ):
__a : List[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
__a : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__a : List[str] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCAmelCase__ ( lowerCamelCase_ : Vectorad , lowerCamelCase_ : int ):
return tuple(round(lowerCamelCase_ , lowerCamelCase_ ) for x in vector ) == (0, 0, 0)
def UpperCAmelCase__ ( lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad , lowerCamelCase_ : int = 1_0 ):
__a : Dict = create_vector(lowerCamelCase_ , lowerCamelCase_ )
__a : Union[str, Any] = create_vector(lowerCamelCase_ , lowerCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
| 47 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "EncodecFeatureExtractor"
a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
__UpperCAmelCase = self.feature_extractor
__UpperCAmelCase = False
def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
__UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__UpperCAmelCase = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__UpperCAmelCase = audio_inputs['''padding_mask''']
return inputs
def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ):
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ):
__UpperCAmelCase = to_numpy(_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(_lowercase )
__UpperCAmelCase = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__UpperCAmelCase = seq_len - padding_mask.shape[-1]
__UpperCAmelCase = 1 - self.feature_extractor.padding_value
__UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
__UpperCAmelCase = audio_values.tolist()
for i in range(_lowercase ):
__UpperCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 49 | 0 |
'''simple docstring'''
UpperCAmelCase__ : int = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def A ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def A ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """ZinengTang/tvlt-base"""
lowerCamelCase__ = tempfile.mkdtemp()
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return TvltImageProcessor.from_pretrained(self.checkpoint ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = TvltProcessor(image_processor=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor ,_lowerCAmelCase )
self.assertIsInstance(processor.image_processor ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = TvltProcessor(image_processor=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase )
lowerCamelCase__ = np.ones([1_20_00] )
lowerCamelCase__ = feature_extractor(_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = processor(audio=_lowerCAmelCase ,return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = TvltProcessor(image_processor=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase )
lowerCamelCase__ = np.ones([3, 2_24, 2_24] )
lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = TvltProcessor(image_processor=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase )
lowerCamelCase__ = np.ones([1_20_00] )
lowerCamelCase__ = np.ones([3, 2_24, 2_24] )
lowerCamelCase__ = processor(audio=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = TvltProcessor(image_processor=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names ,image_processor.model_input_names + feature_extractor.model_input_names ,msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" ,)
| 50 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49 | 0 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[float] ) -> float:
"""simple docstring"""
UpperCAmelCase = 0.00
UpperCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
UpperCAmelCase = f"Resistor at index {index} has a negative or zero value!"
raise ValueError(SCREAMING_SNAKE_CASE_ )
first_sum += 1 / float(SCREAMING_SNAKE_CASE_ )
index += 1
return 1 / first_sum
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[float] ) -> float:
"""simple docstring"""
UpperCAmelCase = 0.00
UpperCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCAmelCase = f"Resistor at index {index} has a negative value!"
raise ValueError(SCREAMING_SNAKE_CASE_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
_lowercase : Any = input('Enter integers separated by spaces: ')
_lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 49 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_snake_case : Any = logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
a_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
a_ = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowercase ( self : List[Any] ) -> Dict:
__lowerCAmelCase = self.task_name.lower()
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """train"""
a_ = """dev"""
a_ = """test"""
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
a_ = 42
a_ = 42
def __init__( self : Any , lowerCAmelCase_ : GlueDataTrainingArguments , lowerCAmelCase_ : PreTrainedTokenizerBase , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Union[str, Split] = Split.train , lowerCAmelCase_ : Optional[str] = None , ) -> List[str]:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , lowerCAmelCase_ , )
__lowerCAmelCase = args
__lowerCAmelCase = glue_processors[args.task_name]()
__lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
__lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCAmelCase , __lowerCAmelCase = label_list[2], label_list[1]
__lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + '.lock'
with FileLock(lowerCAmelCase_ ):
if os.path.exists(lowerCAmelCase_ ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(lowerCAmelCase_ )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowerCAmelCase = examples[:limit_length]
__lowerCAmelCase = glue_convert_examples_to_features(
lowerCAmelCase_ , lowerCAmelCase_ , max_length=args.max_seq_length , label_list=lowerCAmelCase_ , output_mode=self.output_mode , )
__lowerCAmelCase = time.time()
torch.save(self.features , lowerCAmelCase_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Dict ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] ) -> InputFeatures:
return self.features[i]
def lowercase ( self : Any ) -> Dict:
return self.label_list
| 53 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =job["started_at"]
UpperCAmelCase_ =job["completed_at"]
UpperCAmelCase_ =date_parser.parse(lowercase__ )
UpperCAmelCase_ =date_parser.parse(lowercase__ )
UpperCAmelCase_ =round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCAmelCase_ =start
UpperCAmelCase_ =end
UpperCAmelCase_ =duration_in_min
return job_info
def a__ ( lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =None
if token is not None:
UpperCAmelCase_ ={"Accept": "application/vnd.github+json", "Authorization": F'Bearer {token}'}
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ , headers=lowercase__ ).json()
UpperCAmelCase_ ={}
try:
job_time.update({job["name"]: extract_time_from_single_job(lowercase__ ) for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' , headers=lowercase__ ).json()
job_time.update({job["name"]: extract_time_from_single_job(lowercase__ ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
__lowercase : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
__lowercase : Optional[int] =parser.parse_args()
__lowercase : Optional[int] =get_job_time(args.workflow_run_id)
__lowercase : Optional[Any] =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 54 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Tuple = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "swin2sr"
snake_case_ = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] ,A : str=64 ,A : Union[str, Any]=1 ,A : List[Any]=3 ,A : Dict=1_80 ,A : List[str]=[6, 6, 6, 6, 6, 6] ,A : Any=[6, 6, 6, 6, 6, 6] ,A : int=8 ,A : Dict=2.0 ,A : List[str]=True ,A : Dict=0.0 ,A : Tuple=0.0 ,A : Dict=0.1 ,A : List[Any]="gelu" ,A : int=False ,A : Optional[Any]=0.02 ,A : str=1E-5 ,A : List[Any]=2 ,A : Union[str, Any]=1.0 ,A : Any="1conv" ,A : Optional[int]="pixelshuffle" ,**A : Union[str, Any] ,):
super().__init__(**A )
__A = image_size
__A = patch_size
__A = num_channels
__A = embed_dim
__A = depths
__A = len(A )
__A = num_heads
__A = window_size
__A = mlp_ratio
__A = qkv_bias
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = drop_path_rate
__A = hidden_act
__A = use_absolute_embeddings
__A = layer_norm_eps
__A = initializer_range
__A = upscale
__A = img_range
__A = resi_connection
__A = upsampler
| 55 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowercase : int = {
'yjernite/retribert-base-uncased': 5_12,
}
_lowercase : Any = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = RetriBertTokenizer
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 49 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : Any = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["ViTFeatureExtractor"]
_a : Tuple = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : Dict = 'bart'
_lowercase : Dict = True
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCAmelCase = qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCAmelCase = sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = faiss.StandardGpuResources()
__UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCAmelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
__UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
__UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCAmelCase = elia['''train_eli5''']
__UpperCAmelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_lowercase ,_lowercase ,_lowercase : Dict = load_indexes()
_lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models()
_lowercase ,_lowercase : Tuple = load_train_data()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ):
__UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ )
__UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ):
if source == "none":
__UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
__UpperCAmelCase , __UpperCAmelCase = query_es_index(
snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , )
__UpperCAmelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ):
with torch.no_grad():
__UpperCAmelCase = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_lowercase : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
_lowercase : Tuple = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_lowercase : List[str] = action_list.index(action_st)
_lowercase : str = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_lowercase : int = show_type == 'Show full text of passages'
else:
_lowercase : str = 3
_lowercase : List[Any] = True
_lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_lowercase : List[str] = 'wiki40b'
_lowercase : Optional[int] = 'dense'
_lowercase : List[Any] = 'beam'
_lowercase : str = 2
_lowercase : Optional[int] = 64
_lowercase : Union[str, Any] = 2_56
_lowercase : List[str] = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
_lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_lowercase : Optional[int] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_lowercase : Optional[Any] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : Dict = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Union[str, Any] = None
# start main text
_lowercase : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : Optional[Any] = st.text_input('Enter your question here:', '')
else:
_lowercase : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10)
_lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
_lowercase : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Any = support_list[:10]
_lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowercase ,_lowercase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_lowercase : Any = res[1].strip()
if sec_titles == "":
_lowercase : Dict = '[{}]({})'.format(res[0], wiki_url)
else:
_lowercase : List[Any] = sec_titles.split(' & ')
_lowercase : int = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : List[Any] = find_nearest_training(question)
_lowercase : Tuple = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_lowercase : int = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
A_ : Dict = TypeVar('T')
class _lowerCAmelCase( Generic[T] ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Any | T = None
UpperCamelCase_: int = len(_lowerCamelCase )
UpperCamelCase_: list[T] = [any_type for _ in range(self.N )] + arr
UpperCamelCase_: str = fnc
self.build()
def _a ( self ):
for p in range(self.N - 1 , 0 , -1 ):
UpperCamelCase_: Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
p += self.N
UpperCamelCase_: str = v
while p > 1:
UpperCamelCase_: Union[str, Any] = p // 2
UpperCamelCase_: Tuple = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _a ( self , _lowerCamelCase , _lowerCamelCase ): # noqa: E741
UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = l + self.N, r + self.N
UpperCamelCase_: T | None = None
while l <= r:
if l % 2 == 1:
UpperCamelCase_: Optional[Any] = self.st[l] if res is None else self.fn(_lowerCamelCase , self.st[l] )
if r % 2 == 0:
UpperCamelCase_: Union[str, Any] = self.st[r] if res is None else self.fn(_lowerCamelCase , self.st[r] )
UpperCamelCase_ ,UpperCamelCase_: Tuple = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
A_ : Dict = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
A_ : Optional[int] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
A_ : str = SegmentTree(test_array, min)
A_ : Tuple = SegmentTree(test_array, max)
A_ : int = SegmentTree(test_array, lambda a, b: a + b)
def snake_case () -> None:
for i in range(len(UpperCAmelCase__ ) ):
for j in range(UpperCAmelCase__ , len(UpperCAmelCase__ ) ):
UpperCamelCase_: List[Any] = reduce(UpperCAmelCase__ , test_array[i : j + 1] )
UpperCamelCase_: Any = reduce(UpperCAmelCase__ , test_array[i : j + 1] )
UpperCamelCase_: Optional[int] = reduce(lambda UpperCAmelCase__ , UpperCAmelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(UpperCAmelCase__ , UpperCAmelCase__ )
assert max_range == max_segment_tree.query(UpperCAmelCase__ , UpperCAmelCase__ )
assert sum_range == sum_segment_tree.query(UpperCAmelCase__ , UpperCAmelCase__ )
test_all_segments()
for index, value in test_updates.items():
A_ : Any = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 57 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
snake_case_ : Tuple = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__UpperCamelCase )
DownloadCommand.register_subcommand(__UpperCamelCase )
EnvironmentCommand.register_subcommand(__UpperCamelCase )
RunCommand.register_subcommand(__UpperCamelCase )
ServeCommand.register_subcommand(__UpperCamelCase )
UserCommands.register_subcommand(__UpperCamelCase )
AddNewModelCommand.register_subcommand(__UpperCamelCase )
AddNewModelLikeCommand.register_subcommand(__UpperCamelCase )
LfsCommands.register_subcommand(__UpperCamelCase )
PTtoTFCommand.register_subcommand(__UpperCamelCase )
# Let's go
snake_case_ : Union[str, Any] = parser.parse_args()
if not hasattr(__UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
snake_case_ : List[Any] = args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 58 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 | 0 |
import unittest
from transformers import DonutProcessor
__A = "naver-clova-ix/donut-base"
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: int =DonutProcessor.from_pretrained(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->str:
'''simple docstring'''
lowerCamelCase__: Any ={
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
lowerCamelCase__: Tuple =(
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
lowerCamelCase__: Optional[int] =self.processor.tokenajson(UpperCAmelCase_)
self.assertDictEqual(UpperCAmelCase_ , UpperCAmelCase_)
| 59 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Dict = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=_UpperCamelCase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=_UpperCamelCase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=_UpperCamelCase )
return parser.parse_args()
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = parse_args()
# Import training_script as a module.
snake_case_ : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case_ : str = script_fpath.stem
snake_case_ : Optional[int] = importlib.import_module(_UpperCamelCase )
# Patch sys.argv
snake_case_ : Any = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 60 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 | 0 |
def _A ( lowerCAmelCase_ : int = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 61 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ), len(grid[0] )
if (
min(lowercase , lowercase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
count += depth_first_search(lowercase , row + 1 , lowercase , lowercase )
count += depth_first_search(lowercase , row - 1 , lowercase , lowercase )
count += depth_first_search(lowercase , lowercase , col + 1 , lowercase )
count += depth_first_search(lowercase , lowercase , col - 1 , lowercase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Tuple = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ : Tuple = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
__a = ["input_values", "attention_mask"]
def __init__( self , lowerCAmelCase = 1 , lowerCAmelCase = 16000 , lowerCAmelCase = 0.0 , lowerCAmelCase = False , lowerCAmelCase = 80 , lowerCAmelCase = 16 , lowerCAmelCase = 64 , lowerCAmelCase = "hann_window" , lowerCAmelCase = 1.0 , lowerCAmelCase = 80 , lowerCAmelCase = 7600 , lowerCAmelCase = 1e-10 , lowerCAmelCase = 2 , lowerCAmelCase = True , **lowerCAmelCase , ) -> str:
super().__init__(feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= do_normalize
SCREAMING_SNAKE_CASE__: Optional[Any]= return_attention_mask
SCREAMING_SNAKE_CASE__: Optional[int]= num_mel_bins
SCREAMING_SNAKE_CASE__: Union[str, Any]= hop_length
SCREAMING_SNAKE_CASE__: Optional[int]= win_length
SCREAMING_SNAKE_CASE__: Dict= win_function
SCREAMING_SNAKE_CASE__: str= frame_signal_scale
SCREAMING_SNAKE_CASE__: Optional[int]= fmin
SCREAMING_SNAKE_CASE__: Any= fmax
SCREAMING_SNAKE_CASE__: Union[str, Any]= mel_floor
SCREAMING_SNAKE_CASE__: Tuple= reduction_factor
SCREAMING_SNAKE_CASE__: Dict= win_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE__: int= hop_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE__: List[str]= optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE__: List[Any]= (self.n_fft // 2) + 1
SCREAMING_SNAKE_CASE__: List[str]= window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , lowerCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , lowerCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCamelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
SCREAMING_SNAKE_CASE__: Any= np.array(lowerCAmelCase , np.intaa )
SCREAMING_SNAKE_CASE__: Optional[Any]= []
for vector, length in zip(lowerCAmelCase , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE__: str= (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE__: Optional[int]= padding_value
normed_input_values.append(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: List[str]= [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCamelCase_ ( self , lowerCAmelCase , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__: Tuple= spectrogram(
lowerCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
SCREAMING_SNAKE_CASE__: str= self._process_audio(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase , )
else:
SCREAMING_SNAKE_CASE__: int= None
if audio_target is not None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self._process_audio(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase , )
if inputs is None:
return inputs_target
else:
SCREAMING_SNAKE_CASE__: Tuple= inputs_target['''input_values''']
SCREAMING_SNAKE_CASE__: List[str]= inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE__: Dict= decoder_attention_mask
return inputs
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> BatchFeature:
SCREAMING_SNAKE_CASE__: Tuple= isinstance(lowerCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
SCREAMING_SNAKE_CASE__: int= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__: Optional[int]= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
SCREAMING_SNAKE_CASE__: Dict= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__: int= speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__: List[str]= [speech]
# needed to make pad() work on spectrogram inputs
SCREAMING_SNAKE_CASE__: List[str]= self.feature_size
# convert into correct format for padding
if is_target:
SCREAMING_SNAKE_CASE__: List[str]= [self._extract_mel_features(lowerCAmelCase ) for waveform in speech]
SCREAMING_SNAKE_CASE__: int= BatchFeature({'''input_values''': features} )
SCREAMING_SNAKE_CASE__: str= self.num_mel_bins
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= BatchFeature({'''input_values''': speech} )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pad(
lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , truncation=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: str= feature_size_hack
# convert input values to correct format
SCREAMING_SNAKE_CASE__: Union[str, Any]= padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
SCREAMING_SNAKE_CASE__: Dict= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowerCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
SCREAMING_SNAKE_CASE__: List[str]= [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowerCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__: Dict= input_values.astype(np.floataa )
# convert attention_mask to correct format
SCREAMING_SNAKE_CASE__: Union[str, Any]= padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
SCREAMING_SNAKE_CASE__: List[str]= (
attention_mask
if self._get_padding_strategies(lowerCAmelCase , max_length=lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE__: Any= self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=lowerCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__: List[str]= padded_inputs.convert_to_tensors(lowerCAmelCase )
return padded_inputs
def UpperCamelCase_ ( self ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE__: Tuple= super().to_dict()
# Don't serialize these as they are derived from the other properties.
SCREAMING_SNAKE_CASE__: List[str]= ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 64 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = CTRLTokenizer
snake_case_ = False
snake_case_ = False
def __lowercase ( self : List[str] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : List[Any] = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCAmelCase__ : Optional[int] = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCAmelCase__ : int = {"""unk_token""": """<unk>"""}
UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def __lowercase ( self : int ,**A : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : List[Any] ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = """adapt react readapt apt"""
UpperCAmelCase__ : Any = """adapt react readapt apt"""
return input_text, output_text
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase__ : Tuple = """adapt react readapt apt"""
UpperCAmelCase__ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCAmelCase__ : Dict = tokenizer.tokenize(A )
self.assertListEqual(A ,A )
UpperCAmelCase__ : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase__ : Dict = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,A )
| 65 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 0 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
UpperCamelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
for attribute in key.split('.' ):
_lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
_lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
_lowercase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowercase : List[Any] = value
elif weight_type == "weight_g":
_lowercase : Optional[Any] = value
elif weight_type == "weight_v":
_lowercase : Any = value
elif weight_type == "bias":
_lowercase : Any = value
else:
_lowercase : int = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : Any = []
_lowercase : int = fairseq_model.state_dict()
_lowercase : str = hf_model.feature_extractor
_lowercase : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
_lowercase : Any = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
_lowercase : Tuple = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Tuple = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_lowercase : int = True
if "*" in mapped_key:
_lowercase : Tuple = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
_lowercase : Optional[int] = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_lowercase : Tuple = 'weight_g'
elif "weight_v" in name:
_lowercase : List[str] = 'weight_v'
elif "bias" in name:
_lowercase : Optional[int] = 'bias'
elif "weight" in name:
_lowercase : Tuple = 'weight'
else:
_lowercase : int = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : int = full_name.split('conv_layers.' )[-1]
_lowercase : Dict = name.split('.' )
_lowercase : List[Any] = int(items[0] )
_lowercase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowercase : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowercase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowercase : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowercase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
_lowercase : Dict = full_name.split('adaptor.' )[-1]
_lowercase : Optional[int] = name.split('.' )
if items[1].isdigit():
_lowercase : Any = int(items[1] )
else:
_lowercase : Any = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
_lowercase : Any = value
logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
_lowercase : str = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
_lowercase : int = value
logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
_lowercase : Optional[Any] = value
logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
_lowercase : Union[str, Any] = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
_lowercase : Optional[int] = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple:
_lowercase , _lowercase : Tuple = emb.weight.shape
_lowercase : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
_lowercase : int = emb.weight.data
return lin_layer
@torch.no_grad()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]:
_lowercase : List[str] = WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , add_adapter=SCREAMING_SNAKE_CASE , adapter_stride=SCREAMING_SNAKE_CASE , adapter_kernel_size=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , output_hidden_size=SCREAMING_SNAKE_CASE , )
_lowercase : Optional[Any] = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE )
# load model
_lowercase , _lowercase , _lowercase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
_lowercase : int = model[0].eval()
# load feature extractor
_lowercase : int = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE )
# set weights for wav2vec2 encoder
_lowercase : str = WavaVecaModel(SCREAMING_SNAKE_CASE )
recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
# load decoder weights
_lowercase : Any = MBartForCausalLM(SCREAMING_SNAKE_CASE )
_lowercase , _lowercase : Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
_lowercase : List[Any] = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
_lowercase : int = False
_lowercase : Tuple = MBartaaTokenizer(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = hf_wavavec.config.to_dict()
_lowercase : List[str] = tokenizer.pad_token_id
_lowercase : List[Any] = tokenizer.bos_token_id
_lowercase : List[str] = tokenizer.eos_token_id
_lowercase : int = 'mbart50'
_lowercase : Tuple = 'wav2vec2'
_lowercase : Optional[Any] = tokenizer.eos_token_id
_lowercase : Optional[Any] = 250_004
_lowercase : Dict = tokenizer.eos_token_id
_lowercase : int = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config")
UpperCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 66 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
snake_case = HfArgumentParser(InitializationArguments)
snake_case = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
snake_case = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
snake_case = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
snake_case = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 67 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__A = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase__ ( A_: int , A_: Optional[Any] , A_: List[str]=None ) -> List[str]:
"""simple docstring"""
if rng is None:
__UpperCAmelCase =random.Random()
__UpperCAmelCase =1
for dim in shape:
total_dims *= dim
__UpperCAmelCase =[]
for _ in range(A_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__UpperCAmelCase =np.array(A_ , dtype=jnp.intaa ).reshape(A_ )
return output
def lowercase__ ( A_: List[str] , A_: List[str]=None ) -> Any:
"""simple docstring"""
__UpperCAmelCase =ids_tensor(A_ , vocab_size=2 , rng=A_ )
# make sure that at least one token is attended to for each batch
__UpperCAmelCase =1
return attn_mask
@require_flax
class _A :
"""simple docstring"""
lowerCamelCase : Optional[Any] = None
lowerCamelCase : int = ()
def _a ( self : str ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__UpperCAmelCase =2
__UpperCAmelCase =inputs["""input_ids"""].shape[-1] // 2
__UpperCAmelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length]
__UpperCAmelCase =jnp.ones_like(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__UpperCAmelCase =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__UpperCAmelCase =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _a ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =0
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =pt_model_class(__SCREAMING_SNAKE_CASE ).eval()
__UpperCAmelCase =load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , flax_model.params )
__UpperCAmelCase =flax_model.generate(__SCREAMING_SNAKE_CASE ).sequences
__UpperCAmelCase =pt_model.generate(torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__UpperCAmelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _a ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
__UpperCAmelCase =0.8
__UpperCAmelCase =10
__UpperCAmelCase =0.3
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =2
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ) -> Any:
__UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
__UpperCAmelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__UpperCAmelCase ="""Hello world"""
__UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """do_samples""" ):
model.generate(__SCREAMING_SNAKE_CASE , do_samples=__SCREAMING_SNAKE_CASE )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """foo""" ):
__UpperCAmelCase ={"""foo""": """bar"""}
model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 68 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 | 0 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any="attention" ) -> Optional[int]:
__snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
__snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
__snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
__snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str]=False ) -> Optional[int]:
if split_mlp_wi:
__snake_case = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
__snake_case = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
__snake_case = (wi_a, wi_a)
else:
__snake_case = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
__snake_case = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ) -> int:
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def __UpperCAmelCase ( _UpperCAmelCase : dict , *, _UpperCAmelCase : int , _UpperCAmelCase : bool ) -> Optional[int]:
__snake_case = traverse_util.flatten_dict(variables["target"] )
__snake_case = {"/".join(_UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__snake_case = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , _UpperCAmelCase )
__snake_case = collections.OrderedDict()
# Shared embeddings.
__snake_case = old["token_embedder/embedding"]
# Encoder.
for i in range(_UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__snake_case = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , "encoder" , "pre_attention_layer_norm" )
__snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , "encoder" , "attention" )
__snake_case = layer_norm
__snake_case = k.T
__snake_case = o.T
__snake_case = q.T
__snake_case = v.T
# Block i, layer 1 (MLP).
__snake_case = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , "encoder" , "pre_mlp_layer_norm" )
__snake_case , __snake_case = tax_mlp_lookup(_UpperCAmelCase , _UpperCAmelCase , "encoder" , _UpperCAmelCase )
__snake_case = layer_norm
if split_mlp_wi:
__snake_case = wi[0].T
__snake_case = wi[1].T
else:
__snake_case = wi.T
__snake_case = wo.T
__snake_case = old[
"encoder/relpos_bias/rel_embedding"
].T
__snake_case = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(_UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__snake_case = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , "pre_self_attention_layer_norm" )
__snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , "self_attention" )
__snake_case = layer_norm
__snake_case = k.T
__snake_case = o.T
__snake_case = q.T
__snake_case = v.T
# Block i, layer 1 (Cross Attention).
__snake_case = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , "pre_cross_attention_layer_norm" )
__snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , "encoder_decoder_attention" )
__snake_case = layer_norm
__snake_case = k.T
__snake_case = o.T
__snake_case = q.T
__snake_case = v.T
# Block i, layer 2 (MLP).
__snake_case = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , "pre_mlp_layer_norm" )
__snake_case , __snake_case = tax_mlp_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , _UpperCAmelCase )
__snake_case = layer_norm
if split_mlp_wi:
__snake_case = wi[0].T
__snake_case = wi[1].T
else:
__snake_case = wi.T
__snake_case = wo.T
__snake_case = old["decoder/decoder_norm/scale"]
__snake_case = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__snake_case = old["decoder/logits_dense/kernel"].T
return new
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : bool ) -> Optional[int]:
__snake_case = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__snake_case = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__snake_case = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__snake_case = state_dict["shared.weight"]
return state_dict
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ) -> Dict:
__snake_case = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
__snake_case = convert_tax_to_pytorch(_UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=_UpperCAmelCase )
__snake_case = make_state_dict(_UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : bool = False ) -> Optional[Any]:
__snake_case = TaConfig.from_json_file(_UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__snake_case = TaEncoderModel(_UpperCAmelCase )
else:
__snake_case = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCAmelCase )
print("Done" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
a : Dict = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 69 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
lowerCamelCase : Tuple = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
lowerCamelCase : Any = {
"abeja/gpt-neox-japanese-2.7b": 2_048,
}
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
with open(lowercase , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = collections.OrderedDict()
lowerCamelCase_ = collections.OrderedDict()
lowerCamelCase_ = collections.OrderedDict()
with open(lowercase , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowercase ):
lowerCamelCase_ = b
lowerCamelCase_ = idx
for wd in b:
lowerCamelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : str , A_ : Any , A_ : Any , A_ : Optional[Any]="<|endoftext|>" , A_ : Any="<|endoftext|>" , A_ : Optional[int]="<|startoftext|>" , A_ : Union[str, Any]="<|endoftext|>" , A_ : Any=False , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(
unk_token=A_ , pad_token=A_ , bos_token=A_ , eos_token=A_ , do_clean_text=A_ , **A_ , )
if not os.path.isfile(A_ ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(A_ ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
lowerCamelCase_ = do_clean_text
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = load_vocab_and_emoji(A_ , A_ )
lowerCamelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def a__ ( self : Optional[Any] , A_ : str ) -> Tuple:
"""simple docstring"""
return self.subword_tokenizer.tokenize(A_ , clean=self.do_clean_text )
def a__ ( self : Optional[int] , A_ : Dict ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(A_ , self.vocab.get(self.unk_token ) )
def a__ ( self : Union[str, Any] , A_ : Union[str, Any] ) -> int:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(A_ )
def a__ ( self : Optional[int] , A_ : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = ''.join(A_ ).strip()
return out_string
def a__ ( self : Optional[Any] , A_ : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] )
if len(A_ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
return input_ids
def a__ ( self : List[Any] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCamelCase_ = 0
if os.path.isdir(A_ ):
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
lowerCamelCase_ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
lowerCamelCase_ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(A_ , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(','.join(A_ ) + '\n' )
index += 1
with open(A_ , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , A_ )
return vocab_file, emoji_file
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Any , A_ : Union[str, Any] , A_ : int , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = vocab # same as swe
lowerCamelCase_ = ids_to_tokens # same as bpe
lowerCamelCase_ = emoji
lowerCamelCase_ = np.max([len(A_ ) for w in self.vocab.keys()] )
lowerCamelCase_ = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
lowerCamelCase_ = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
lowerCamelCase_ = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
lowerCamelCase_ = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCamelCase_ = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCamelCase_ = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
lowerCamelCase_ = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
lowerCamelCase_ = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
lowerCamelCase_ = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : str ) -> Optional[int]:
"""simple docstring"""
return len(self.ids_to_tokens )
def a__ ( self : Union[str, Any] , A_ : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.content_repattera.sub('<URL>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<EMAIL>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<TEL>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<DATE>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<DATE>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<PRICE>' , A_ )
lowerCamelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCamelCase_ = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def a__ ( self : int , A_ : Optional[Any] , A_ : Tuple=False ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = text.replace(' ' , '<SP>' )
lowerCamelCase_ = text.replace(' ' , '<SP>' )
lowerCamelCase_ = text.replace('\r\n' , '<BR>' )
lowerCamelCase_ = text.replace('\n' , '<BR>' )
lowerCamelCase_ = text.replace('\r' , '<BR>' )
lowerCamelCase_ = text.replace('\t' , '<TAB>' )
lowerCamelCase_ = text.replace('—' , 'ー' )
lowerCamelCase_ = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCamelCase_ = text.replace(A_ , A_ )
if clean:
lowerCamelCase_ = self.clean_text(A_ )
def check_simbol(A_ : Union[str, Any] ):
lowerCamelCase_ = x.encode()
if len(A_ ) == 1 and len(A_ ) == 2:
lowerCamelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(A_ : Tuple ):
lowerCamelCase_ = x.encode()
if len(A_ ) == 1 and len(A_ ) == 3:
lowerCamelCase_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
lowerCamelCase_ = 0
lowerCamelCase_ = []
while pos < len(A_ ):
lowerCamelCase_ = min(len(A_ ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
lowerCamelCase_ = [] # (token_id, token, pos)
for e in range(A_ , A_ , -1 ):
lowerCamelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(A_ ) > 2:
lowerCamelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(A_ ) > 0:
# the smallest token_id is adopted
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = sorted(A_ , key=lambda A_ : x[0] )[0]
result.append(A_ )
lowerCamelCase_ = e
else:
lowerCamelCase_ = pos + 1
lowerCamelCase_ = text[pos:end]
if check_simbol(A_ ):
result.append('<KIGOU>' )
elif checkuae(A_ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
lowerCamelCase_ = end
return result
def a__ ( self : List[Any] , A_ : Tuple , A_ : List[str]="\n" ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(A_ ) > 0:
words.append(bytearray(A_ ).decode('utf-8' , errors='replace' ) )
lowerCamelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(A_ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(A_ )
if len(A_ ) > 0:
words.append(bytearray(A_ ).decode('utf-8' , errors='replace' ) )
lowerCamelCase_ = ''.join(A_ )
return text
| 70 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "EncodecFeatureExtractor"
a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
__UpperCAmelCase = self.feature_extractor
__UpperCAmelCase = False
def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
__UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__UpperCAmelCase = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__UpperCAmelCase = audio_inputs['''padding_mask''']
return inputs
def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ):
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ):
__UpperCAmelCase = to_numpy(_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(_lowercase )
__UpperCAmelCase = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__UpperCAmelCase = seq_len - padding_mask.shape[-1]
__UpperCAmelCase = 1 - self.feature_extractor.padding_value
__UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
__UpperCAmelCase = audio_values.tolist()
for i in range(_lowercase ):
__UpperCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 49 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Dict =StableDiffusionPanoramaPipeline
__A : Tuple =TEXT_TO_IMAGE_PARAMS
__A : Dict =TEXT_TO_IMAGE_BATCH_PARAMS
__A : Union[str, Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
__A : List[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
UpperCAmelCase_ : Union[str, Any] = DDIMScheduler()
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Dict = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
UpperCAmelCase_ : int = torch.manual_seed(_snake_case )
UpperCAmelCase_ : Optional[Any] = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = StableDiffusionPanoramaPipeline(**_snake_case )
UpperCAmelCase_ : Dict = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = sd_pipe(**_snake_case ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : List[Any] = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ ( self ):
super().test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=3.25E-3 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = StableDiffusionPanoramaPipeline(**_snake_case )
UpperCAmelCase_ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = "french fries"
UpperCAmelCase_ : Tuple = sd_pipe(**_snake_case ,negative_prompt=_snake_case )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Dict = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = StableDiffusionPanoramaPipeline(**_snake_case )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ,view_batch_size=2 )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Any = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : List[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" )
UpperCAmelCase_ : str = StableDiffusionPanoramaPipeline(**_snake_case )
UpperCAmelCase_ : Dict = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Union[str, Any] = sd_pipe(**_snake_case ).images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = PNDMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,skip_prk_steps=_snake_case )
UpperCAmelCase_ : Tuple = StableDiffusionPanoramaPipeline(**_snake_case )
UpperCAmelCase_ : Optional[int] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : str = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = sd_pipe(**_snake_case ).images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ,_snake_case=0 ):
UpperCAmelCase_ : Any = torch.manual_seed(_snake_case )
UpperCAmelCase_ : List[Any] = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "stabilityai/stable-diffusion-2-base"
UpperCAmelCase_ : int = DDIMScheduler.from_pretrained(_snake_case ,subfolder="scheduler" )
UpperCAmelCase_ : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case ,scheduler=_snake_case ,safety_checker=_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
UpperCAmelCase_ : Any = self.get_inputs()
UpperCAmelCase_ : List[str] = pipe(**_snake_case ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
UpperCAmelCase_ : Optional[int] = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" ,safety_checker=_snake_case )
UpperCAmelCase_ : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
UpperCAmelCase_ : str = self.get_inputs()
UpperCAmelCase_ : Optional[Any] = pipe(**_snake_case ).images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
UpperCAmelCase_ : str = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = 0
def callback_fn(_snake_case ,_snake_case ,_snake_case ) -> None:
UpperCAmelCase_ : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_ : List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
UpperCAmelCase_ : Tuple = latents[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[int] = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCAmelCase_ : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
UpperCAmelCase_ : Optional[int] = latents[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCAmelCase_ : int = False
UpperCAmelCase_ : List[Any] = "stabilityai/stable-diffusion-2-base"
UpperCAmelCase_ : Optional[Any] = DDIMScheduler.from_pretrained(_snake_case ,subfolder="scheduler" )
UpperCAmelCase_ : Any = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case ,scheduler=_snake_case ,safety_checker=_snake_case )
UpperCAmelCase_ : int = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
UpperCAmelCase_ : Optional[int] = self.get_inputs()
pipe(**_snake_case ,callback=_snake_case ,callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ : Dict = "stabilityai/stable-diffusion-2-base"
UpperCAmelCase_ : Union[str, Any] = DDIMScheduler.from_pretrained(_snake_case ,subfolder="scheduler" )
UpperCAmelCase_ : Dict = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case ,scheduler=_snake_case ,safety_checker=_snake_case )
UpperCAmelCase_ : Tuple = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ : Union[str, Any] = self.get_inputs()
UpperCAmelCase_ : Dict = pipe(**_snake_case )
UpperCAmelCase_ : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 71 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
'''simple docstring'''
import math
class __magic_name__ :
def _A( self , snake_case_ , snake_case_ ):
lowercase =0.0
lowercase =0.0
for i in range(len(snake_case_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
for i in range(len(snake_case_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase =[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowercase =[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowercase =SelfOrganizingMap()
lowercase =3
lowercase =0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
lowercase =training_samples[j]
# Compute the winning vector
lowercase =self_organizing_map.get_winner(lowercase_ , lowercase_ )
# Update the winning vector
lowercase =self_organizing_map.update(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# classify test sample
lowercase =[0, 0, 0, 1]
lowercase =self_organizing_map.get_winner(lowercase_ , lowercase_ )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 72 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49 | 0 |
from __future__ import annotations
class _snake_case :
def __init__( self , a) -> None:
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def lowerCamelCase__ (_UpperCAmelCase): # In Order traversal of the tree
if tree:
display(tree.left)
print(tree.data)
display(tree.right)
def lowerCamelCase__ (_UpperCAmelCase):
return 1 + max(depth_of_tree(tree.left) , depth_of_tree(tree.right)) if tree else 0
def lowerCamelCase__ (_UpperCAmelCase):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left) and is_full_binary_tree(tree.right)
else:
return not tree.left and not tree.right
def lowerCamelCase__ (): # Main function for testing.
SCREAMING_SNAKE_CASE = Node(1)
SCREAMING_SNAKE_CASE = Node(2)
SCREAMING_SNAKE_CASE = Node(3)
SCREAMING_SNAKE_CASE = Node(4)
SCREAMING_SNAKE_CASE = Node(5)
SCREAMING_SNAKE_CASE = Node(6)
SCREAMING_SNAKE_CASE = Node(7)
SCREAMING_SNAKE_CASE = Node(8)
SCREAMING_SNAKE_CASE = Node(9)
print(is_full_binary_tree(_UpperCAmelCase))
print(depth_of_tree(_UpperCAmelCase))
print('Tree is: ')
display(_UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''funnel'''
lowerCAmelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : Dict , _A : Any=3_0522 , _A : Tuple=[4, 4, 4] , _A : Optional[Any]=None , _A : int=2 , _A : Any=768 , _A : str=12 , _A : Any=64 , _A : Union[str, Any]=3072 , _A : Any="gelu_new" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[Any]=0.0 , _A : int=0.1 , _A : Optional[int]=None , _A : Tuple=1e-9 , _A : Optional[Any]="mean" , _A : Dict="relative_shift" , _A : int=True , _A : List[str]=True , _A : List[Any]=True , **_A : List[Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Dict = block_sizes
__SCREAMING_SNAKE_CASE : Optional[Any] = [1] * len(_A ) if block_repeats is None else block_repeats
assert len(_A ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_decoder_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : int = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Any = hidden_dropout
__SCREAMING_SNAKE_CASE : List[str] = attention_dropout
__SCREAMING_SNAKE_CASE : Union[str, Any] = activation_dropout
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = initializer_std
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__SCREAMING_SNAKE_CASE : Optional[Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__SCREAMING_SNAKE_CASE : int = attention_type
__SCREAMING_SNAKE_CASE : Dict = separate_cls
__SCREAMING_SNAKE_CASE : Optional[int] = truncate_seq
__SCREAMING_SNAKE_CASE : Any = pool_q_only
super().__init__(**_A )
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def UpperCAmelCase__ ( self : Dict , _A : List[Any] ):
"""simple docstring"""
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def UpperCAmelCase__ ( self : Optional[int] , _A : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 74 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
_lowercase : Any = input('Enter integers separated by spaces: ')
_lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 49 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 0 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
a_ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
a_ = [0, 2_5, 5_0]
a_ = [2_5, 5_0, 7_5]
a_ = fuzz.membership.trimf(X, abca)
a_ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
a_ = np.ones(7_5)
a_ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
a_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
a_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
a_ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
a_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
a_ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
a_ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
a_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
a_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 76 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
__UpperCAmelCase : Dict = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__UpperCAmelCase : Union[str, Any] = n - k
# Calculate C(n,k)
for i in range(UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , UpperCamelCase ) // (node_count + 1)
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError("factorial() not defined for negative values" )
__UpperCAmelCase : Optional[Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
return catalan_number(UpperCamelCase ) * factorial(UpperCamelCase )
if __name__ == "__main__":
A = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 77 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowercase : int = {
'yjernite/retribert-base-uncased': 5_12,
}
_lowercase : Any = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = RetriBertTokenizer
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 49 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_: List[str] =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : str = ReformerTokenizer
a__ : Optional[Any] = ReformerTokenizerFast
a__ : Any = True
a__ : Union[str, Any] = False
a__ : int = True
def _lowercase (self : List[str] ):
super().setUp()
UpperCAmelCase_ = ReformerTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self : int ):
UpperCAmelCase_ = "<s>"
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__a ) , 1000 )
def _lowercase (self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowercase (self : Dict ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.tokenize(__a )
UpperCAmelCase_ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def _lowercase (self : Union[str, Any] , __a : List[str]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase_ = ("This is a simple input", "This is a pair")
UpperCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def _lowercase (self : Optional[Any] ):
pass
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = ReformerTokenizer(__a , keep_accents=__a )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowercase (self : Optional[int] ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
UpperCAmelCase_ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def _lowercase (self : Tuple ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ = " ".join(__a )
UpperCAmelCase_ = self.big_tokenizer.encode_plus(__a , return_tensors="pt" )
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
UpperCAmelCase_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCAmelCase_ = encoded_sequence["input_ids"].shape
UpperCAmelCase_ = ReformerModel(__a )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def _lowercase (self : Dict ):
# fmt: off
UpperCAmelCase_ = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCAmelCase_ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=__a , sequences=__a , )
| 78 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : Dict = 'bart'
_lowercase : Dict = True
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCAmelCase = qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCAmelCase = sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = faiss.StandardGpuResources()
__UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCAmelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
__UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
__UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCAmelCase = elia['''train_eli5''']
__UpperCAmelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_lowercase ,_lowercase ,_lowercase : Dict = load_indexes()
_lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models()
_lowercase ,_lowercase : Tuple = load_train_data()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ):
__UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ )
__UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ):
if source == "none":
__UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
__UpperCAmelCase , __UpperCAmelCase = query_es_index(
snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , )
__UpperCAmelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ):
with torch.no_grad():
__UpperCAmelCase = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_lowercase : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
_lowercase : Tuple = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_lowercase : List[str] = action_list.index(action_st)
_lowercase : str = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_lowercase : int = show_type == 'Show full text of passages'
else:
_lowercase : str = 3
_lowercase : List[Any] = True
_lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_lowercase : List[str] = 'wiki40b'
_lowercase : Optional[int] = 'dense'
_lowercase : List[Any] = 'beam'
_lowercase : str = 2
_lowercase : Optional[int] = 64
_lowercase : Union[str, Any] = 2_56
_lowercase : List[str] = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
_lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_lowercase : Optional[int] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_lowercase : Optional[Any] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : Dict = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Union[str, Any] = None
# start main text
_lowercase : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : Optional[Any] = st.text_input('Enter your question here:', '')
else:
_lowercase : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10)
_lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
_lowercase : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Any = support_list[:10]
_lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowercase ,_lowercase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_lowercase : Any = res[1].strip()
if sec_titles == "":
_lowercase : Dict = '[{}]({})'.format(res[0], wiki_url)
else:
_lowercase : List[Any] = sec_titles.split(' & ')
_lowercase : int = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : List[Any] = find_nearest_training(question)
_lowercase : Tuple = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_lowercase : int = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49 | 0 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
UpperCAmelCase__ : Any = data
UpperCAmelCase__ : List[Any] = [0X6745_2301, 0Xefcd_ab89, 0X98ba_dcfe, 0X1032_5476, 0Xc3d2_e1f0]
@staticmethod
def __UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
return ((n << b) | (n >> (32 - b))) & 0Xffff_ffff
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase__ : Optional[int] = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCAmelCase ( self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Dict = list(struct.unpack(""">16L""" , _lowerCAmelCase ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase__ : Optional[int] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = self.padding()
UpperCAmelCase__ : List[str] = self.split_blocks()
for block in self.blocks:
UpperCAmelCase__ : Tuple = self.expand_block(_lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase__ : Optional[int] = (b & c) | ((~b) & d)
UpperCAmelCase__ : int = 0X5a82_7999
elif 20 <= i < 40:
UpperCAmelCase__ : Tuple = b ^ c ^ d
UpperCAmelCase__ : int = 0X6ed9_eba1
elif 40 <= i < 60:
UpperCAmelCase__ : List[str] = (b & c) | (b & d) | (c & d)
UpperCAmelCase__ : Tuple = 0X8f1b_bcdc
elif 60 <= i < 80:
UpperCAmelCase__ : int = b ^ c ^ d
UpperCAmelCase__ : str = 0Xca62_c1d6
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (
self.rotate(_lowerCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0Xffff_ffff,
a,
self.rotate(_lowerCAmelCase , 30 ),
c,
d,
)
UpperCAmelCase__ : int = (
self.h[0] + a & 0Xffff_ffff,
self.h[1] + b & 0Xffff_ffff,
self.h[2] + c & 0Xffff_ffff,
self.h[3] + d & 0Xffff_ffff,
self.h[4] + e & 0Xffff_ffff,
)
return ("{:08x}" * 5).format(*self.h )
def _lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = B"""Test String"""
assert SHAaHash(__lowerCamelCase ).final_hash() == hashlib.shaa(__lowerCamelCase ).hexdigest() # noqa: S324
def _lowerCamelCase ( ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCAmelCase__ : str = parser.parse_args()
UpperCAmelCase__ : Union[str, Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCAmelCase__ : List[Any] = f.read()
else:
UpperCAmelCase__ : int = bytes(__lowerCamelCase , """utf-8""" )
print(SHAaHash(__lowerCamelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 79 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 | 0 |
def snake_case ( ):
'''simple docstring'''
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = 1
__lowercase = 2
while i * i <= n:
__lowercase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def snake_case ( ):
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(lowerCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 80 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 | 0 |
def lowerCAmelCase_ ( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(__lowerCamelCase , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCamelCase = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 | 0 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCAmelCase__ = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class __snake_case ( unittest.TestCase):
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
_lowerCamelCase : Any = FlaxBertModel(__lowerCAmelCase )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
_lowerCamelCase : Union[str, Any] = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
_lowerCamelCase : List[Any] = flatten_dict(unfreeze(model.params ) )
_lowerCamelCase : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_lowerCamelCase : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
_lowerCamelCase : Dict = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
_lowerCamelCase : Tuple = flatten_dict(unfreeze(model.params ) )
_lowerCamelCase : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_lowerCamelCase : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
_lowerCamelCase : Optional[int] = FlaxBertModel(__lowerCAmelCase )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
_lowerCamelCase : Optional[Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
_lowerCamelCase : int = flatten_dict(unfreeze(model.params ) )
_lowerCamelCase : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_lowerCamelCase : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__lowerCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
_lowerCamelCase : List[Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
_lowerCamelCase : Tuple = flatten_dict(unfreeze(model.params ) )
_lowerCamelCase : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_lowerCamelCase : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
def snake_case_ ( A_ : Any, A_ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[Any] = flatten_dict(modela.params )
_lowerCamelCase : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
_lowerCamelCase : int = False
return models_are_equal
@require_flax
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_lowerCamelCase : Any = FlaxBertModel(__lowerCAmelCase )
_lowerCamelCase : Any = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : str = FlaxBertModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertTrue(check_models_equal(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_lowerCamelCase : Tuple = FlaxBertModel(__lowerCAmelCase )
_lowerCamelCase : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , max_shard_size='''10KB''' )
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = FlaxBertModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Dict = FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertTrue(check_models_equal(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = '''bert'''
_lowerCamelCase : Union[str, Any] = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = FlaxBertModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = '''bert'''
_lowerCamelCase : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : str = FlaxBertModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
| 83 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class A_ :
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Node | None
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = None
for i in sorted(snake_case , reverse=snake_case ):
lowercase = Node(snake_case , self.head )
def __iter__( self ):
lowercase = self.head
while node:
yield node.data
lowercase = node.next_node
def __len__( self ):
return sum(1 for _ in self )
def __str__( self ):
return " -> ".join([str(snake_case ) for node in self] )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return SortedLinkedList(list(__SCREAMING_SNAKE_CASE ) + list(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 84 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49 | 0 |
import os
SCREAMING_SNAKE_CASE__ : Any = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : Dict = 0
while index < len(lowercase__ ) - 1:
SCREAMING_SNAKE_CASE__ : List[str] = SYMBOLS[numerals[index]]
SCREAMING_SNAKE_CASE__ : List[str] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _a ( lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ''
SCREAMING_SNAKE_CASE__ : Tuple = num // 10_00
numerals += m_count * "M"
num %= 10_00
SCREAMING_SNAKE_CASE__ : Tuple = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
SCREAMING_SNAKE_CASE__ : Dict = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _a ( lowercase__ : str = "/p089_roman.txt" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 0
with open(os.path.dirname(lowercase__ ) + roman_numerals_filename ) as filea:
SCREAMING_SNAKE_CASE__ : Optional[Any] = filea.readlines()
for line in lines:
SCREAMING_SNAKE_CASE__ : Optional[int] = line.strip()
SCREAMING_SNAKE_CASE__ : Optional[int] = parse_roman_numerals(lowercase__ )
SCREAMING_SNAKE_CASE__ : int = generate_roman_numerals(lowercase__ )
savings += len(lowercase__ ) - len(lowercase__ )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
A_ = -1
A_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
A_ = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase )
A_ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ = TextStreamer(UpperCAmelCase )
model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ = cs.out[:-1]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Tuple ):
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
A_ = -1
A_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
A_ = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase )
A_ = tokenizer.decode(greedy_ids[0] )
A_ = TextIteratorStreamer(UpperCAmelCase )
A_ = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
A_ = Thread(target=model.generate , kwargs=UpperCAmelCase )
thread.start()
A_ = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[str] ):
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
A_ = -1
A_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
A_ = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase )
A_ = greedy_ids[:, input_ids.shape[1] :]
A_ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ = TextStreamer(UpperCAmelCase , skip_prompt=UpperCAmelCase )
model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ = cs.out[:-1]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[Any] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
A_ = AutoTokenizer.from_pretrained("distilgpt2" )
A_ = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(UpperCAmelCase )
A_ = -1
A_ = torch.ones((1, 5) , device=UpperCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ = TextStreamer(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
model.generate(UpperCAmelCase , max_new_tokens=1 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ = cs.out[:-1] # Remove the final "\n"
A_ = tokenizer(UpperCAmelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __A ( self : int ):
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
A_ = -1
A_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
A_ = TextIteratorStreamer(UpperCAmelCase , timeout=0.001 )
A_ = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
A_ = Thread(target=model.generate , kwargs=UpperCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCAmelCase ):
A_ = ""
for new_text in streamer:
streamer_text += new_text
| 86 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49 | 0 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
return EnvironmentCommand()
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : ArgumentParser) ->int:
'''simple docstring'''
A__ = parser.add_parser('''env''')
download_parser.set_defaults(func=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = huggingface_hub.__version__
A__ = '''not installed'''
A__ = '''NA'''
if is_torch_available():
import torch
A__ = torch.__version__
A__ = torch.cuda.is_available()
A__ = '''not installed'''
if is_transformers_available():
import transformers
A__ = transformers.__version__
A__ = '''not installed'''
if is_accelerate_available():
import accelerate
A__ = accelerate.__version__
A__ = '''not installed'''
if is_xformers_available():
import xformers
A__ = xformers.__version__
A__ = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''')
print(self.format_dict(UpperCAmelCase__))
return info
@staticmethod
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : Tuple) ->Tuple:
'''simple docstring'''
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()]) + "\n"
| 87 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( A_ ,unittest.TestCase ):
__UpperCAmelCase = BioGptTokenizer
__UpperCAmelCase = False
def UpperCamelCase_ ( self) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
_lowerCamelCase : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE))))
_lowerCamelCase : List[str] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""") as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE))
with open(self.merges_file , """w""") as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Union[str, Any]:
_lowerCamelCase : Optional[int] = """lower newer"""
_lowerCamelCase : Optional[Any] = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : Dict = BioGptTokenizer(self.vocab_file , self.merges_file)
_lowerCamelCase : Dict = """lower"""
_lowerCamelCase : List[Any] = ["""low""", """er</w>"""]
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = tokens + ["""<unk>"""]
_lowerCamelCase : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : int = BioGptTokenizer.from_pretrained("""microsoft/biogpt""")
_lowerCamelCase : int = tokenizer.encode("""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
self.assertTrue(encoded_sentence == [2] + text)
self.assertTrue(encoded_pair == [2] + text + [2] + text_a)
| 88 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Any = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 89 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
__UpperCAmelCase = '''▁'''
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Dict = VOCAB_FILES_NAMES
lowercase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[str] = BigBirdTokenizer
lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"]
lowercase__ : List[int] = []
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_="<unk>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[MASK]" , lowerCamelCase_="[CLS]" , **lowerCamelCase_ , ) -> Any:
lowerCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
lowerCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
lowerCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
lowerCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
lowerCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
lowerCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = False if not self.vocab_file else True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 90 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Dict ,A_ : Optional[Any]=3 ,A_ : Optional[int]=32 ,A_ : Dict=3 ,A_ : Tuple=10 ,A_ : Tuple=[8, 16, 32, 64] ,A_ : Optional[int]=[1, 1, 2, 1] ,A_ : int=True ,A_ : Dict=True ,A_ : Union[str, Any]="relu" ,A_ : Dict=3 ,A_ : Optional[int]=None ,A_ : int=["stage2", "stage3", "stage4"] ,A_ : Tuple=[2, 3, 4] ,A_ : List[str]=1 ,) -> str:
A = parent
A = batch_size
A = image_size
A = num_channels
A = embeddings_size
A = hidden_sizes
A = depths
A = is_training
A = use_labels
A = hidden_act
A = num_labels
A = scope
A = len(A_ )
A = out_features
A = out_indices
A = num_groups
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.num_labels )
A = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
return BitConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Any ,A_ : List[Any] ) -> Optional[Any]:
A = BitModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any] ,A_ : int ,A_ : Tuple ) -> Tuple:
A = self.num_labels
A = BitForImageClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : str ,A_ : Optional[Any] ) -> str:
A = BitBackbone(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A = None
A = BitBackbone(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCamelCase: List[Any] = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: Optional[int] = False
_lowerCamelCase: List[str] = False
_lowerCamelCase: int = False
_lowerCamelCase: Any = False
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = BitModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
return
@unittest.skip(reason='Bit does not output attentions' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(config=A_ )
for name, module in model.named_modules():
if isinstance(A_ ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,)
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
def check_hidden_states_output(A_ : Any ,A_ : List[str] ,A_ : int ):
A = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(A_ ,A_ ) )
A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A = self.model_tester.num_stages
self.assertEqual(len(A_ ) ,expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A = layer_type
A = True
check_hidden_states_output(A_ ,A_ ,A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(A_ ,A_ ,A_ )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = BitModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=A_ ,return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
A = model(**A_ )
# verify the logits
A = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,A_ )
A = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A_ ,atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = (BitBackbone,) if is_torch_available() else ()
_lowerCamelCase: List[str] = BitConfig
_lowerCamelCase: Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
A = BitModelTester(self )
| 91 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 0 |
'''simple docstring'''
import numpy as np
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : str ) -> List[str]:
lowercase : str =int(np.ceil((x_end - xa) / h ) )
lowercase : Optional[Any] =np.zeros((n + 1,) )
lowercase : Optional[int] =ya
lowercase : List[Any] =xa
for k in range(__magic_name__ ):
lowercase : str =f(__magic_name__ , y[k] )
lowercase : Optional[int] =f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowercase : List[str] =f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowercase : Optional[int] =f(x + h , y[k] + h * ka )
lowercase : List[Any] =y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "EncodecFeatureExtractor"
a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
__UpperCAmelCase = self.feature_extractor
__UpperCAmelCase = False
def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
__UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__UpperCAmelCase = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__UpperCAmelCase = audio_inputs['''padding_mask''']
return inputs
def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ):
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ):
__UpperCAmelCase = to_numpy(_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(_lowercase )
__UpperCAmelCase = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__UpperCAmelCase = seq_len - padding_mask.shape[-1]
__UpperCAmelCase = 1 - self.feature_extractor.padding_value
__UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
__UpperCAmelCase = audio_values.tolist()
for i in range(_lowercase ):
__UpperCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 49 | 0 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _lowerCAmelCase ( a ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
lowerCAmelCase__ :Tuple = BertTokenizer.from_pretrained('bert-base-uncased' )
lowerCAmelCase__ :str = bertabert.config.encoder.vocab_size
lowerCAmelCase__ :Tuple = tokenizer.sep_token_id
lowerCAmelCase__ :int = tokenizer.cls_token_id
lowerCAmelCase__ :Any = 1_2_8
lowerCAmelCase__ :List[str] = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
lowerCAmelCase__ :str = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
lowerCAmelCase__ :Optional[Any] = train_dataset.select(range(3_2 ) )
lowerCAmelCase__ :Tuple = val_dataset.select(range(1_6 ) )
lowerCAmelCase__ :str = 4
def _map_to_encoder_decoder_inputs(__UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCAmelCase__ :int = tokenizer(batch['article'] , padding='max_length' , truncation=__UpperCAmelCase , max_length=5_1_2 )
lowerCAmelCase__ :int = tokenizer(batch['highlights'] , padding='max_length' , truncation=__UpperCAmelCase , max_length=1_2_8 )
lowerCAmelCase__ :Any = inputs.input_ids
lowerCAmelCase__ :Union[str, Any] = inputs.attention_mask
lowerCAmelCase__ :Dict = outputs.input_ids
lowerCAmelCase__ :Optional[Any] = outputs.input_ids.copy()
lowerCAmelCase__ :List[str] = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
lowerCAmelCase__ :Dict = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 5_1_2 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = pred.label_ids
lowerCAmelCase__ :List[Any] = pred.predictions
# all unnecessary tokens are removed
lowerCAmelCase__ :Tuple = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :Any = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :Dict = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
lowerCAmelCase__ :str = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
lowerCAmelCase__ :List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
lowerCAmelCase__ :str = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ :List[str] = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='steps' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCAmelCase__ :Any = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 93 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
'''simple docstring'''
from math import pow, sqrt
def lowercase_ ( *__A : float ) -> bool:
"""simple docstring"""
lowercase : Dict =len(__A ) > 0 and all(value > 0.0 for value in values )
return result
def lowercase_ ( __A : float , __A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def lowercase_ ( __A : float , __A : float , __A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowercase_ ( __A : float , __A : float , __A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowercase_ ( __A : float , __A : float , __A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowercase_ ( __A : float , __A : float , __A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 94 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49 | 0 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCamelCase_ (__A ):
__magic_name__ = ''''''
__magic_name__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__magic_name__ = None # compression type in fsspec. ex: "gzip"
__magic_name__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , lowerCAmelCase_ : str = "" , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[dict] = None , **lowerCAmelCase_ : List[str] ) -> List[Any]:
super().__init__(self , **lowerCAmelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase_ : Dict = fsspec.open(
lowerCAmelCase_ , mode="rb" , protocol=lowerCAmelCase_ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCAmelCase_ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] )
UpperCAmelCase_ : Any = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
UpperCAmelCase_ : Any = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCAmelCase_ ).lstrip("/" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
if self.dir_cache is None:
UpperCAmelCase_ : Optional[Any] = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
UpperCAmelCase_ : str = {f["name"]: f}
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : str ) -> Any:
return self.file.open().read()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str = "rb" , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Tuple , ) -> Tuple:
UpperCAmelCase_ : List[str] = self._strip_protocol(lowerCAmelCase_ )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class UpperCamelCase_ (__A ):
__magic_name__ = '''bz2'''
__magic_name__ = '''bz2'''
__magic_name__ = '''.bz2'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''gzip'''
__magic_name__ = '''gzip'''
__magic_name__ = '''.gz'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''lz4'''
__magic_name__ = '''lz4'''
__magic_name__ = '''.lz4'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''xz'''
__magic_name__ = '''xz'''
__magic_name__ = '''.xz'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''zstd'''
__magic_name__ = '''zstd'''
__magic_name__ = '''.zst'''
def __init__( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : str = "rb" , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[dict] = None , lowerCAmelCase_ : int = DEFAULT_BLOCK_SIZE , **lowerCAmelCase_ : List[Any] , ) -> Dict:
super().__init__(
fo=lowerCAmelCase_ , mode=lowerCAmelCase_ , target_protocol=lowerCAmelCase_ , target_options=lowerCAmelCase_ , block_size=lowerCAmelCase_ , **lowerCAmelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase_ : Optional[Any] = self.file.__enter__
class UpperCamelCase_ :
def __init__( self : Tuple , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = file_
def __enter__( self : Tuple ) -> List[Any]:
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : int ) -> Optional[int]:
self._file.__exit__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __iter__( self : Optional[int] ) -> int:
return iter(self._file )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return next(self._file )
def __getattr__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
return getattr(self._file , lowerCAmelCase_ )
def fixed_enter(*lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[Any] ):
return WrappedFile(_enter(*lowerCAmelCase_ , **lowerCAmelCase_ ) )
UpperCAmelCase_ : List[Any] = fixed_enter
| 95 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 | 0 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowerCamelCase = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowerCamelCase = [0, 25, 50]
__lowerCamelCase = [25, 50, 75]
__lowerCamelCase = fuzz.membership.trimf(X, abca)
__lowerCamelCase = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowerCamelCase = np.ones(75)
__lowerCamelCase = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowerCamelCase = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowerCamelCase = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowerCamelCase = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowerCamelCase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowerCamelCase = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowerCamelCase = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowerCamelCase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowerCamelCase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 96 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
_lowercase : Any = input('Enter integers separated by spaces: ')
_lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 49 | 0 |
import heapq
import sys
import numpy as np
__a = tuple[int, int]
class lowercase__:
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Any:
lowercase_ = []
lowercase_ = set()
def _lowercase ( self : Dict ) -> Tuple:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _lowercase ( self : int ) -> Any:
return len(self.elements ) == 0
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str ) -> str:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE_ )
else:
# update
# print("update", item)
lowercase_ = []
((lowercase_) , (lowercase_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowercase_) , (lowercase_)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE_ )
lowercase_ = []
((lowercase_) , (lowercase_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowercase_) , (lowercase_)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _lowercase ( self : Any ) -> Optional[int]:
return self.elements[0][1]
def _lowercase ( self : Tuple ) -> str:
((lowercase_) , (lowercase_)) = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE_ )
return (priority, item)
def a ( snake_case__: TPos , snake_case__: TPos ):
'''simple docstring'''
# euclidean distance
lowercase_ = np.array(snake_case__ )
lowercase_ = np.array(snake_case__ )
return np.linalg.norm(a - b )
def a ( snake_case__: TPos , snake_case__: TPos ):
'''simple docstring'''
# integer division by time variable
return consistent_heuristic(snake_case__ , snake_case__ ) // t
def a ( snake_case__: TPos , snake_case__: TPos ):
'''simple docstring'''
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def a ( snake_case__: TPos , snake_case__: int , snake_case__: TPos , snake_case__: dict[TPos, float] ):
'''simple docstring'''
lowercase_ = g_function[start] + Wa * heuristics[i](snake_case__ , snake_case__ )
return ans
def a ( snake_case__: List[Any] , snake_case__: Dict , snake_case__: List[str] ):
'''simple docstring'''
lowercase_ = np.chararray((n, n) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
lowercase_ = '''*'''
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (j, (n - 1) - i) in blocks:
lowercase_ = '''#'''
lowercase_ = '''-'''
lowercase_ = back_pointer[goal]
while x != start:
((lowercase_) , (lowercase_)) = x
# print(x)
lowercase_ = '''-'''
lowercase_ = back_pointer[x]
lowercase_ = '''-'''
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
lowercase_ = back_pointer[goal]
while x != start:
print(snake_case__ , end=''' ''' )
lowercase_ = back_pointer[x]
print(snake_case__ )
sys.exit()
def a ( snake_case__: TPos ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def a ( snake_case__: Optional[Any] , snake_case__: List[str] , snake_case__: Dict , snake_case__: str , snake_case__: Dict , snake_case__: Optional[Any] , snake_case__: List[Any] , snake_case__: Optional[int] , ):
'''simple docstring'''
for itera in range(snake_case__ ):
open_list[itera].remove_element(snake_case__ )
# print("s", s)
# print("j", j)
((lowercase_) , (lowercase_)) = s
lowercase_ = (x - 1, y)
lowercase_ = (x + 1, y)
lowercase_ = (x, y + 1)
lowercase_ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(snake_case__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(snake_case__ )
lowercase_ = -1
lowercase_ = float('''inf''' )
if valid(snake_case__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase_ = g_function[s] + 1
lowercase_ = s
if neighbours not in close_list_anchor:
open_list[0].put(snake_case__ , key(snake_case__ , 0 , snake_case__ , snake_case__ ) )
if neighbours not in close_list_inad:
for var in range(1 , snake_case__ ):
if key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) <= Wa * key(
snake_case__ , 0 , snake_case__ , snake_case__ ):
open_list[j].put(
snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
def a ( ):
'''simple docstring'''
lowercase_ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__a = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__a = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
__a = make_common_ground()
__a = blocks_blk
# hyper parameters
__a = 1
__a = 1
__a = 2_0
__a = 3 # one consistent and two other inconsistent
# start and end destination
__a = (0, 0)
__a = (n - 1, n - 1)
__a = 1
def a ( snake_case__: TPos , snake_case__: TPos , snake_case__: int ):
'''simple docstring'''
lowercase_ = {start: 0, goal: float('''inf''' )}
lowercase_ = {start: -1, goal: -1}
lowercase_ = []
lowercase_ = set()
for i in range(snake_case__ ):
open_list.append(PriorityQueue() )
open_list[i].put(snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
lowercase_ = []
lowercase_ = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , snake_case__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
lowercase_ , lowercase_ = open_list[i].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_inad.append(snake_case__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
lowercase_ = open_list[0].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , 0 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_anchor.append(snake_case__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(snake_case__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 97 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 0 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def a__ ( ) -> Tuple:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching, '''os.path.join''', lowercase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def a__ ( ) -> Tuple:
"""simple docstring"""
assert _test_patching.open is open
_UpperCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, '''open''', lowercase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching, '''pandas.read_csv''', lowercase ):
pass
def a__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, '''len''', lowercase ) is None
with patch_submodule(_test_patching, '''len''', lowercase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCamelCase = patch_submodule(_test_patching, '''open''', lowercase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def a__ ( ) -> List[str]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCamelCase = '''__test_patch_submodule_successive_join__'''
_UpperCamelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, '''os.path.join''', lowercase ):
with patch_submodule(_test_patching, '''os.rename''', lowercase ):
with patch_submodule(_test_patching, '''os.path.dirname''', lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, '''os.rename''', lowercase ):
with patch_submodule(_test_patching, '''os.path.join''', lowercase ):
with patch_submodule(_test_patching, '''os.path.dirname''', lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def a__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching, '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''', lowercase ):
pass
with patch_submodule(_test_patching, '''os.__attribute_that_doesn_exist__''', lowercase ):
pass
| 98 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self , __A , __A ):
__a = jnp.ones((batch_size, length) ) / length
return scores
def snake_case_ ( self ):
__a = None
__a = 20
__a = self._get_uniform_logits(batch_size=2 , length=__A )
# tweak scores to not be uniform anymore
__a = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__a = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__a = jax.nn.softmax(__A , axis=-1 )
__a = FlaxTemperatureLogitsWarper(temperature=0.5 )
__a = FlaxTemperatureLogitsWarper(temperature=1.3 )
__a = jax.nn.softmax(temp_dist_warper_sharper(__A , scores.copy() , cur_len=__A ) , axis=-1 )
__a = jax.nn.softmax(temp_dist_warper_smoother(__A , scores.copy() , cur_len=__A ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def snake_case_ ( self ):
__a = None
__a = 10
__a = 2
# create ramp distribution
__a = np.broadcast_to(np.arange(__A )[None, :] , (batch_size, vocab_size) ).copy()
__a = ramp_logits[1:, : vocab_size // 2] + vocab_size
__a = FlaxTopKLogitsWarper(3 )
__a = top_k_warp(__A , __A , cur_len=__A )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__a = 5
__a = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__a = np.broadcast_to(np.arange(__A )[None, :] , (batch_size, length) ).copy()
__a = top_k_warp_safety_check(__A , __A , cur_len=__A )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def snake_case_ ( self ):
__a = None
__a = 10
__a = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__a = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__a = FlaxTopPLogitsWarper(0.8 )
__a = np.exp(top_p_warp(__A , __A , cur_len=__A ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__a = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__a = np.broadcast_to(np.arange(__A )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__a = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__a = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__a = top_p_warp(__A , __A , cur_len=__A )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def snake_case_ ( self ):
__a = 20
__a = 4
__a = 0
__a = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__A )
# check that min length is applied at length 5
__a = ids_tensor((batch_size, 20) , vocab_size=20 )
__a = 5
__a = self._get_uniform_logits(__A , __A )
__a = min_dist_processor(__A , __A , cur_len=__A )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
__a = self._get_uniform_logits(__A , __A )
__a = 15
__a = min_dist_processor(__A , __A , cur_len=__A )
self.assertFalse(jnp.isinf(__A ).any() )
def snake_case_ ( self ):
__a = 20
__a = 4
__a = 0
__a = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__A )
# check that all scores are -inf except the bos_token_id score
__a = ids_tensor((batch_size, 1) , vocab_size=20 )
__a = 1
__a = self._get_uniform_logits(__A , __A )
__a = logits_processor(__A , __A , cur_len=__A )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__a = 3
__a = self._get_uniform_logits(__A , __A )
__a = logits_processor(__A , __A , cur_len=__A )
self.assertFalse(jnp.isinf(__A ).any() )
def snake_case_ ( self ):
__a = 20
__a = 4
__a = 0
__a = 5
__a = FlaxForcedEOSTokenLogitsProcessor(max_length=__A , eos_token_id=__A )
# check that all scores are -inf except the eos_token_id when max_length is reached
__a = ids_tensor((batch_size, 4) , vocab_size=20 )
__a = 4
__a = self._get_uniform_logits(__A , __A )
__a = logits_processor(__A , __A , cur_len=__A )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__a = 3
__a = self._get_uniform_logits(__A , __A )
__a = logits_processor(__A , __A , cur_len=__A )
self.assertFalse(jnp.isinf(__A ).any() )
def snake_case_ ( self ):
__a = 4
__a = 10
__a = 15
__a = 2
__a = 1
__a = 15
# dummy input_ids and scores
__a = ids_tensor((batch_size, sequence_length) , __A )
__a = input_ids.copy()
__a = self._get_uniform_logits(__A , __A )
__a = scores.copy()
# instantiate all dist processors
__a = FlaxTemperatureLogitsWarper(temperature=0.5 )
__a = FlaxTopKLogitsWarper(3 )
__a = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__a = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__A )
__a = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__A )
__a = FlaxForcedEOSTokenLogitsProcessor(max_length=__A , eos_token_id=__A )
__a = 10
# no processor list
__a = temp_dist_warp(__A , __A , cur_len=__A )
__a = top_k_warp(__A , __A , cur_len=__A )
__a = top_p_warp(__A , __A , cur_len=__A )
__a = min_dist_proc(__A , __A , cur_len=__A )
__a = bos_dist_proc(__A , __A , cur_len=__A )
__a = eos_dist_proc(__A , __A , cur_len=__A )
# with processor list
__a = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__a = processor(__A , __A , cur_len=__A )
# scores should be equal
self.assertTrue(jnp.allclose(__A , __A , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def snake_case_ ( self ):
__a = 4
__a = 10
__a = 15
__a = 2
__a = 1
__a = 15
# dummy input_ids and scores
__a = ids_tensor((batch_size, sequence_length) , __A )
__a = input_ids.copy()
__a = self._get_uniform_logits(__A , __A )
__a = scores.copy()
# instantiate all dist processors
__a = FlaxTemperatureLogitsWarper(temperature=0.5 )
__a = FlaxTopKLogitsWarper(3 )
__a = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__a = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__A )
__a = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__A )
__a = FlaxForcedEOSTokenLogitsProcessor(max_length=__A , eos_token_id=__A )
__a = 10
# no processor list
def run_no_processor_list(__A , __A , __A ):
__a = temp_dist_warp(__A , __A , cur_len=__A )
__a = top_k_warp(__A , __A , cur_len=__A )
__a = top_p_warp(__A , __A , cur_len=__A )
__a = min_dist_proc(__A , __A , cur_len=__A )
__a = bos_dist_proc(__A , __A , cur_len=__A )
__a = eos_dist_proc(__A , __A , cur_len=__A )
return scores
# with processor list
def run_processor_list(__A , __A , __A ):
__a = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__a = processor(__A , __A , cur_len=__A )
return scores
__a = jax.jit(__A )
__a = jax.jit(__A )
__a = jitted_run_no_processor_list(__A , __A , __A )
__a = jitted_run_processor_list(__A , __A , __A )
# scores should be equal
self.assertTrue(jnp.allclose(__A , __A , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 99 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowercase : int = {
'yjernite/retribert-base-uncased': 5_12,
}
_lowercase : Any = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = RetriBertTokenizer
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 49 | 0 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : List[Any] = None
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A_ , '''feature_size''' ) )
self.assertTrue(hasattr(A_ , '''sampling_rate''' ) )
self.assertTrue(hasattr(A_ , '''padding_value''' ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(A_ ) == len(A_ ) for x, y in zip(A_ , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
SCREAMING_SNAKE_CASE__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
SCREAMING_SNAKE_CASE__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
SCREAMING_SNAKE_CASE__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def lowercase_ ( self , A_=False ):
'''simple docstring'''
def _inputs_have_equal_length(A_ ):
SCREAMING_SNAKE_CASE__ = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(A_ , A_ ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1E-3 ):
return False
return True
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE__ = feat_extract.pad(A_ , padding=A_ )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(A_ , padding='''longest''' )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(A_ , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='''max_length''' )[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
A_ , padding='''max_length''' , max_length=A_ , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE__ = feat_extract.pad(A_ , pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(A_ , padding='''longest''' , pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
A_ , padding='''max_length''' , pad_to_multiple_of=10 , max_length=A_ )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
A_ , padding='''max_length''' , pad_to_multiple_of=10 , max_length=A_ , return_tensors='''np''' , )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
self.assertTrue(all(len(A_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
SCREAMING_SNAKE_CASE__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(A_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE__ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def lowercase_ ( self , A_=False ):
'''simple docstring'''
def _inputs_have_equal_length(A_ ):
SCREAMING_SNAKE_CASE__ = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(A_ , A_ ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1E-3 ):
return False
return True
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=A_ )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(A_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=A_ , )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to middle
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=A_ , return_tensors='''np''' , )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=A_ )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='''longest''' , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='''longest''' , truncation=A_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='''max_length''' , truncation=A_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , truncation=A_ , )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , )
SCREAMING_SNAKE_CASE__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE__ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE__ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
def lowercase_ ( self ):
'''simple docstring'''
self._check_padding(numpify=A_ )
def lowercase_ ( self ):
'''simple docstring'''
self._check_padding(numpify=A_ )
def lowercase_ ( self ):
'''simple docstring'''
self._check_truncation(numpify=A_ )
def lowercase_ ( self ):
'''simple docstring'''
self._check_truncation(numpify=A_ )
@require_torch
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''' )[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''' )[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**A_ )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE__ = [len(A_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , A_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**A_ )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE__ = [len(A_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = min(A_ )
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
A_ , padding='''max_length''' , max_length=A_ , truncation=A_ , return_tensors='''np''' )
self.assertIn('''attention_mask''' , A_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 100 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : Dict = 'bart'
_lowercase : Dict = True
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCAmelCase = qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCAmelCase = sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = faiss.StandardGpuResources()
__UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCAmelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
__UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
__UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCAmelCase = elia['''train_eli5''']
__UpperCAmelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_lowercase ,_lowercase ,_lowercase : Dict = load_indexes()
_lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models()
_lowercase ,_lowercase : Tuple = load_train_data()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ):
__UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ )
__UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ):
if source == "none":
__UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
__UpperCAmelCase , __UpperCAmelCase = query_es_index(
snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , )
__UpperCAmelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ):
with torch.no_grad():
__UpperCAmelCase = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_lowercase : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
_lowercase : Tuple = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_lowercase : List[str] = action_list.index(action_st)
_lowercase : str = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_lowercase : int = show_type == 'Show full text of passages'
else:
_lowercase : str = 3
_lowercase : List[Any] = True
_lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_lowercase : List[str] = 'wiki40b'
_lowercase : Optional[int] = 'dense'
_lowercase : List[Any] = 'beam'
_lowercase : str = 2
_lowercase : Optional[int] = 64
_lowercase : Union[str, Any] = 2_56
_lowercase : List[str] = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
_lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_lowercase : Optional[int] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_lowercase : Optional[Any] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : Dict = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Union[str, Any] = None
# start main text
_lowercase : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : Optional[Any] = st.text_input('Enter your question here:', '')
else:
_lowercase : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10)
_lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
_lowercase : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Any = support_list[:10]
_lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowercase ,_lowercase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_lowercase : Any = res[1].strip()
if sec_titles == "":
_lowercase : Dict = '[{}]({})'.format(res[0], wiki_url)
else:
_lowercase : List[Any] = sec_titles.split(' & ')
_lowercase : int = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : List[Any] = find_nearest_training(question)
_lowercase : Tuple = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_lowercase : int = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = FlaxAutoencoderKL
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3
SCREAMING_SNAKE_CASE_ : Any = (3_2, 3_2)
SCREAMING_SNAKE_CASE_ : int = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = jax.random.uniform(lowerCAmelCase__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_input
return init_dict, inputs_dict
| 101 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 | 0 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
UpperCamelCase : str = tf.cast(math.pi , x.dtype )
UpperCamelCase : List[Any] = tf.cast(0.04_47_15 , x.dtype )
UpperCamelCase : List[str] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(SCREAMING_SNAKE_CASE , 3 )) ))
return x * cdf
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
return x * tf.tanh(tf.math.softplus(SCREAMING_SNAKE_CASE ) )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tf.cast(0.04_47_15 , x.dtype )
UpperCamelCase : Dict = tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return tf.clip_by_value(_gelu(SCREAMING_SNAKE_CASE ) , -10 , 10 )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ):
UpperCamelCase , UpperCamelCase : List[str] = tf.split(SCREAMING_SNAKE_CASE , 2 , axis=SCREAMING_SNAKE_CASE )
return a * tf.math.sigmoid(SCREAMING_SNAKE_CASE )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return tf.keras.activations.gelu(SCREAMING_SNAKE_CASE , approximate=SCREAMING_SNAKE_CASE )
__magic_name__ : List[str] = tf.keras.activations.gelu
__magic_name__ : int = approximate_gelu_wrap
else:
__magic_name__ : Optional[Any] = _gelu
__magic_name__ : Optional[Any] = _gelu_new
__magic_name__ : Optional[Any] = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 102 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 | 0 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Dict = WavaVecaPhonemeCTCTokenizer
A__ : List[str] = False
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
_snake_case = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
_snake_case = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
_snake_case = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + '''\n''' )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Any , __lowerCamelCase : Dict=False , __lowerCamelCase : List[Any]=2_0 , __lowerCamelCase : int=5 ):
"""simple docstring"""
_snake_case = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCamelCase )) for i in range(len(__lowerCamelCase ) )]
_snake_case = list(filter(lambda __lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__lowerCamelCase ) , __lowerCamelCase ) )
if max_length is not None and len(__lowerCamelCase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(__lowerCamelCase ) < min_length and len(__lowerCamelCase ) > 0:
while len(__lowerCamelCase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
if " " not in output_txt and len(__lowerCamelCase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCamelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCamelCase )
)
if with_prefix_space:
_snake_case = ''' ''' + output_txt
_snake_case = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
return output_txt, output_ids
def __UpperCAmelCase ( self : List[Any] , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
_snake_case = tokenizer('''m xxx ɪ''' , do_phonemize=__lowerCamelCase ).input_ids
self.assertEqual(__lowerCamelCase , [1_3, 3_9_2, 1_7] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
_snake_case = tokenizer('''m aaa ɪ ccc''' , do_phonemize=__lowerCamelCase ).input_ids
self.assertEqual(__lowerCamelCase , [1_3, 3_9_3, 1_7, 3_9_5] ) # aaa and ccc should be after xxx and 2 after aaa
_snake_case = tokenizer('''maɪ c''' , do_phonemize=__lowerCamelCase ).input_ids
self.assertEqual(__lowerCamelCase , [3, 2_0_0] ) # mai should be <unk> (=3)
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer.phonemize(__lowerCamelCase , phonemizer_lang='''en-us''' )
self.assertEqual(__lowerCamelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer.phonemize(__lowerCamelCase , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(__lowerCamelCase ).input_ids , tokenizer(__lowerCamelCase , do_phonemize=__lowerCamelCase ).input_ids )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer.phonemize(__lowerCamelCase , phonemizer_lang='''en-us''' )
_snake_case = tokenizer.decode(tokenizer(__lowerCamelCase ).input_ids )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
_snake_case = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
_snake_case = tokenizer.decode(sample_ids[0] )
_snake_case = tokenizer.batch_decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , batch_tokens[0] )
self.assertEqual(__lowerCamelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer.phonemize(__lowerCamelCase , phonemizer_lang='''en-us''' )
self.assertEqual(__lowerCamelCase , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer.phonemize(__lowerCamelCase , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(__lowerCamelCase ).input_ids , tokenizer(__lowerCamelCase , do_phonemize=__lowerCamelCase ).input_ids )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
_snake_case = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
_snake_case = tokenizer.decode(sample_ids[0] )
_snake_case = tokenizer.batch_decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , batch_tokens[0] )
self.assertEqual(__lowerCamelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
_snake_case = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__lowerCamelCase )
_snake_case = tokenizer.batch_decode(__lowerCamelCase , filter_word_delimiter_token=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , batch_tokens[0] )
self.assertEqual(__lowerCamelCase , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer.phonemize(__lowerCamelCase , phonemizer_lang='''en-us''' )
_snake_case = tokenizer.decode(tokenizer(__lowerCamelCase ).input_ids , filter_word_delimiter_token=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer.phonemize(__lowerCamelCase , phonemizer_lang='''en-us''' )
_snake_case = tokenizer.decode(tokenizer(__lowerCamelCase ).input_ids , filter_word_delimiter_token=__lowerCamelCase )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=__lowerCamelCase )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer(__lowerCamelCase , phonemizer_lang='''en-us''' ).input_ids
_snake_case = tokenizer(__lowerCamelCase , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
_snake_case = tokenizer.decode(__lowerCamelCase )
_snake_case = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(__lowerCamelCase , '''ɛ l o h aʊ a ʁ j u''' )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
_snake_case = '''Hello how Are you'''
_snake_case = '''hello how are you'''
_snake_case = tokenizer(__lowerCamelCase ).input_ids
_snake_case = tokenizer(__lowerCamelCase ).input_ids
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
_snake_case = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
_snake_case = tokenizer.batch_decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = [d[key] for d in offsets]
return retrieved_list
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_snake_case = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
_snake_case = tokenizer.decode(__lowerCamelCase , output_char_offsets=__lowerCamelCase , filter_word_delimiter_token=__lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(__lowerCamelCase , __lowerCamelCase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7] )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(__lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
self.assertTrue(isinstance(__lowerCamelCase , __lowerCamelCase ) )
self.assertTrue(isinstance(outputs_list[0] , __lowerCamelCase ) )
# transform list to ModelOutput
_snake_case = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
[recursive_check(__lowerCamelCase , __lowerCamelCase ) for la, la in zip(__lowerCamelCase , __lowerCamelCase )]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
_snake_case = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_snake_case = tokenizer.batch_decode(__lowerCamelCase , output_char_offsets=__lowerCamelCase )
_snake_case = [tokenizer.decode(__lowerCamelCase , output_char_offsets=__lowerCamelCase ) for ids in sample_ids]
check_list_tuples_equal(__lowerCamelCase , __lowerCamelCase )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_snake_case = tokenizer.vocab_size
_snake_case = len(__lowerCamelCase )
self.assertNotEqual(__lowerCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_snake_case = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
_snake_case = tokenizer.add_tokens(__lowerCamelCase )
_snake_case = tokenizer.vocab_size
_snake_case = len(__lowerCamelCase )
self.assertNotEqual(__lowerCamelCase , 0 )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , len(__lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , all_size + len(__lowerCamelCase ) )
_snake_case = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowerCamelCase )
self.assertGreaterEqual(len(__lowerCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_snake_case = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
_snake_case = tokenizer.add_special_tokens(__lowerCamelCase )
_snake_case = tokenizer.vocab_size
_snake_case = len(__lowerCamelCase )
self.assertNotEqual(__lowerCamelCase , 0 )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , len(__lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , all_size_a + len(__lowerCamelCase ) )
_snake_case = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowerCamelCase )
self.assertGreaterEqual(len(__lowerCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
_snake_case = self.get_tokenizers(fast=__lowerCamelCase , do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_snake_case = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
_snake_case = tokenizer.convert_tokens_to_string(__lowerCamelCase )
self.assertIsInstance(output['''text'''] , __lowerCamelCase )
| 103 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(UpperCAmelCase_, UpperCAmelCase_ ):
A__ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCAmelCase_ )
if number < 0:
return False
A__ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowerCAmelCase_ ( unittest.TestCase ):
__a : Tuple = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__a : Any = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = TextaTextGenerationPipeline(model=snake_case__ ,tokenizer=snake_case__ )
return generator, ["Something to write", "Something else"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = generator('Something there' )
self.assertEqual(snake_case__ ,[{'generated_text': ANY(snake_case__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
SCREAMING_SNAKE_CASE_ : List[Any] = generator(['This is great !', 'Something else'] ,num_return_sequences=2 ,do_sample=snake_case__ )
self.assertEqual(
snake_case__ ,[
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
] ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = generator(
['This is great !', 'Something else'] ,num_return_sequences=2 ,batch_size=2 ,do_sample=snake_case__ )
self.assertEqual(
snake_case__ ,[
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
] ,)
with self.assertRaises(snake_case__ ):
generator(4 )
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline('text2text-generation' ,model='patrickvonplaten/t5-tiny-random' ,framework='pt' )
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_ : str = generator('Something there' ,do_sample=snake_case__ )
self.assertEqual(snake_case__ ,[{'generated_text': ''}] )
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : Dict = generator(
'Something there' ,num_return_sequences=snake_case__ ,num_beams=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = generator('This is a test' ,do_sample=snake_case__ ,num_return_sequences=2 ,return_tensors=snake_case__ )
self.assertEqual(
snake_case__ ,[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] ,)
SCREAMING_SNAKE_CASE_ : Any = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_ : str = '<pad>'
SCREAMING_SNAKE_CASE_ : str = generator(
['This is a test', 'This is a second test'] ,do_sample=snake_case__ ,num_return_sequences=2 ,batch_size=2 ,return_tensors=snake_case__ ,)
self.assertEqual(
snake_case__ ,[
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] ,)
@require_tf
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = pipeline('text2text-generation' ,model='patrickvonplaten/t5-tiny-random' ,framework='tf' )
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_ : List[Any] = generator('Something there' ,do_sample=snake_case__ )
self.assertEqual(snake_case__ ,[{'generated_text': ''}] )
| 105 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 106 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49 | 0 |
'''simple docstring'''
import sys
_UpperCAmelCase : Optional[int] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _SCREAMING_SNAKE_CASE ( __snake_case : str = N ):
_A = -sys.maxsize - 1
for i in range(len(__snake_case ) - 1_2 ):
_A = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
_A = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 107 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a: Union[str, Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> List[Any]:
_UpperCAmelCase = b.T
_UpperCAmelCase = np.sum(np.square(__snake_case ) , axis=1 )
_UpperCAmelCase = np.sum(np.square(__snake_case ) , axis=0 )
_UpperCAmelCase = np.matmul(__snake_case , __snake_case )
_UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Tuple:
_UpperCAmelCase = x.reshape(-1 , 3 )
_UpperCAmelCase = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = ['''pixel_values''']
def __init__( self : Dict , lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase : bool = True , lowerCamelCase : bool = True , **lowerCamelCase : Dict , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = size if size is not None else {"""height""": 256, """width""": 256}
_UpperCAmelCase = get_size_dict(lowerCamelCase )
_UpperCAmelCase = np.array(lowerCamelCase ) if clusters is not None else None
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_color_quantize
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Any , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
lowerCamelCase , size=(size["""height"""], size["""width"""]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : np.ndarray , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = rescale(image=lowerCamelCase , scale=1 / 127.5 , data_format=lowerCamelCase )
_UpperCAmelCase = image - 1
return image
def lowerCamelCase ( self : Any , lowerCamelCase : ImageInput , lowerCamelCase : bool = None , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCamelCase : List[str] , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(lowerCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_UpperCAmelCase = clusters if clusters is not None else self.clusters
_UpperCAmelCase = np.array(lowerCamelCase )
_UpperCAmelCase = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=lowerCamelCase ) for image in images]
if do_color_quantize:
_UpperCAmelCase = [to_channel_dimension_format(lowerCamelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_UpperCAmelCase = np.array(lowerCamelCase )
_UpperCAmelCase = color_quantize(lowerCamelCase , lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_UpperCAmelCase = images.shape[0]
_UpperCAmelCase = images.reshape(lowerCamelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_UpperCAmelCase = list(lowerCamelCase )
else:
_UpperCAmelCase = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_UpperCAmelCase = {"""input_ids""": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 108 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a = ["gpt2"]
a = "gpt2"
if is_tf_available():
class __a ( tf.Module ):
def __init__( self : str ,lowerCamelCase : List[Any] ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = TFGPTaLMHeadModel.from_config(lowerCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name="""text""" ),) )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenized["""input_ids"""].to_tensor()
__SCREAMING_SNAKE_CASE = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__SCREAMING_SNAKE_CASE = self.model(input_ids=lowerCamelCase ,attention_mask=lowerCamelCase )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = [GPTaTokenizer.from_pretrained(lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__SCREAMING_SNAKE_CASE = [TFGPTaTokenizer.from_pretrained(lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__SCREAMING_SNAKE_CASE = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
__SCREAMING_SNAKE_CASE = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__SCREAMING_SNAKE_CASE = tokenizer([test_inputs] ,return_tensors="""tf""" )
__SCREAMING_SNAKE_CASE = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__SCREAMING_SNAKE_CASE = python_outputs[key].numpy()
__SCREAMING_SNAKE_CASE = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowerCamelCase ,tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__SCREAMING_SNAKE_CASE = tf.function(lowerCamelCase )
for test_inputs in self.test_sentences:
__SCREAMING_SNAKE_CASE = tf.constant(lowerCamelCase )
__SCREAMING_SNAKE_CASE = compiled_tokenizer(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tf_tokenizer(lowerCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__SCREAMING_SNAKE_CASE = ModelToSave(tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor([self.test_sentences[0]] )
__SCREAMING_SNAKE_CASE = model.serving(lowerCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """saved.model"""
tf.saved_model.save(lowerCamelCase ,lowerCamelCase ,signatures={"""serving_default""": model.serving} )
__SCREAMING_SNAKE_CASE = tf.saved_model.load(lowerCamelCase )
__SCREAMING_SNAKE_CASE = loaded_model.signatures["""serving_default"""](lowerCamelCase )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor([self.test_sentences[0]] )
__SCREAMING_SNAKE_CASE = tf_tokenizer(lowerCamelCase ) # Build model with some sample inputs
__SCREAMING_SNAKE_CASE = tf_tokenizer.get_config()
__SCREAMING_SNAKE_CASE = TFGPTaTokenizer.from_config(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_from_config(lowerCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__SCREAMING_SNAKE_CASE = 12_3123
for max_length in [3, 5, 1024]:
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor([self.test_sentences[0]] )
__SCREAMING_SNAKE_CASE = tf_tokenizer(lowerCamelCase ,max_length=lowerCamelCase )
__SCREAMING_SNAKE_CASE = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 109 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def lowerCamelCase ( _snake_case = 1500000 ):
UpperCAmelCase__ : defaultdict = defaultdict(_snake_case )
UpperCAmelCase__ : str = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 ,_snake_case ,2 ):
if gcd(_snake_case ,_snake_case ) > 1:
continue
UpperCAmelCase__ : Dict = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_snake_case ,limit + 1 ,_snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'{solution() = }')
| 110 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 654 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : int = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCamelCase : Tuple = 250004
__UpperCamelCase : Optional[Any] = 250020
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( _lowerCAmelCase,unittest.TestCase ):
'''simple docstring'''
a_ : List[str] = MBartaaTokenizer
a_ : List[str] = MBartaaTokenizerFast
a_ : Union[str, Any] = True
a_ : Optional[Any] = True
def _snake_case ( self : List[str] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : Optional[Any] = MBartaaTokenizer(_lowercase , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = """<s>"""
__lowerCamelCase : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_lowercase ) , 1_0_5_4 )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Tuple = MBartaaTokenizer(_lowercase , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=_lowercase )
__lowerCamelCase : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__lowerCamelCase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__lowerCamelCase : Dict = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__lowerCamelCase : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : Tuple = {"""input_ids""": [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def _snake_case ( self : Any ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowerCamelCase : Dict = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__lowerCamelCase : Optional[int] = tempfile.mkdtemp()
__lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(_lowercase )
__lowerCamelCase : Tuple = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__lowerCamelCase : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
__lowerCamelCase : int = tokenizer_r.from_pretrained(_lowercase )
__lowerCamelCase : Optional[Any] = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=True
__lowerCamelCase : int = tempfile.mkdtemp()
__lowerCamelCase : Dict = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
__lowerCamelCase : Optional[int] = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
__lowerCamelCase : List[Any] = tokenizer_r.from_pretrained(_lowercase )
__lowerCamelCase : List[str] = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=False
__lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
__lowerCamelCase : List[Any] = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
__lowerCamelCase : List[Any] = tokenizer_p.save_pretrained(_lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCamelCase : int = tokenizer_r.from_pretrained(_lowercase )
__lowerCamelCase : int = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
a_ : Tuple = "facebook/mbart-large-50-one-to-many-mmt"
a_ : Optional[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
a_ : List[str] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
a_ : Dict = [EN_CODE, 8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2]
@classmethod
def _snake_case ( cls : Tuple ):
'''simple docstring'''
__lowerCamelCase : Dict = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__lowerCamelCase : Optional[Any] = 1
return cls
def _snake_case ( self : Tuple ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 2_5_0_0_3_8 )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
__lowerCamelCase : Optional[Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
__lowerCamelCase : Union[str, Any] = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
__lowerCamelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , _lowercase )
__lowerCamelCase : Optional[int] = 1_0
__lowerCamelCase : List[Any] = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0]
self.assertEqual(ids[0] , _lowercase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(_lowercase ) , _lowercase )
def _snake_case ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : List[Any] = tempfile.mkdtemp()
__lowerCamelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowercase )
__lowerCamelCase : List[str] = MBartaaTokenizer.from_pretrained(_lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase )
@require_torch
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowercase , return_tensors="""pt""" )
__lowerCamelCase : Tuple = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__lowerCamelCase : Dict = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
__lowerCamelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors="""pt""" )
__lowerCamelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=1_0 , return_tensors="""pt""" )
__lowerCamelCase : Tuple = targets["""input_ids"""]
__lowerCamelCase : Any = shift_tokens_right(_lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(_lowercase ) , {
# en_XX, A, test, EOS
"""input_ids""": [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 519 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase = 600851475143 ):
'''simple docstring'''
try:
_lowerCAmelCase : int = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : Optional[int] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_lowerCAmelCase : Optional[Any] = i
while n % i == 0:
_lowerCAmelCase : Optional[int] = n // i
i += 1
return int(snake_case_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 259 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
A = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*UpperCamelCase : int ,**UpperCamelCase : Tuple ) -> Dict:
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' ,_lowercase ,)
super().__init__(*_lowercase ,**_lowercase )
| 125 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "EncodecFeatureExtractor"
a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
__UpperCAmelCase = self.feature_extractor
__UpperCAmelCase = False
def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
__UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__UpperCAmelCase = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__UpperCAmelCase = audio_inputs['''padding_mask''']
return inputs
def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ):
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ):
__UpperCAmelCase = to_numpy(_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(_lowercase )
__UpperCAmelCase = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__UpperCAmelCase = seq_len - padding_mask.shape[-1]
__UpperCAmelCase = 1 - self.feature_extractor.padding_value
__UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
__UpperCAmelCase = audio_values.tolist()
for i in range(_lowercase ):
__UpperCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 49 | 0 |
def snake_case ( snake_case__ :list) -> Optional[Any]:
_A = len(snake_case_)
for _ in range(snake_case_):
for i in range(_ % 2 , arr_size - 1 , 2):
if arr[i + 1] < arr[i]:
_A , _A = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 401 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : List[str] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__A : List[str] = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__A : List[str] = {
'allenai/longformer-base-4096': 4_0_9_6,
'allenai/longformer-large-4096': 4_0_9_6,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_0_9_6,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_0_9_6,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Tuple="<unk>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="<mask>" , __lowerCamelCase : int=False , **__lowerCamelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(_lowercase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self : str ):
return len(self.encoder )
def _snake_case ( self : List[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : str , __lowerCamelCase : Any ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(_lowercase )
SCREAMING_SNAKE_CASE = get_pairs(_lowercase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(_lowercase , key=lambda __lowerCamelCase : self.bpe_ranks.get(_lowercase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(_lowercase ):
try:
SCREAMING_SNAKE_CASE = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(_lowercase )
SCREAMING_SNAKE_CASE = new_word
if len(_lowercase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(_lowercase )
SCREAMING_SNAKE_CASE = " ".join(_lowercase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : List[Any] , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , _lowercase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : int , __lowerCamelCase : Union[str, Any] ):
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Any , __lowerCamelCase : List[Any] ):
return self.decoder.get(_lowercase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = "".join(_lowercase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(_lowercase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=False , **__lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
| 16 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = KandinskyImgaImgPipeline
SCREAMING_SNAKE_CASE__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
SCREAMING_SNAKE_CASE__ : Tuple = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
@property
def __magic_name__( self :Tuple ) -> Optional[int]:
return 32
@property
def __magic_name__( self :int ) -> Union[str, Any]:
return 32
@property
def __magic_name__( self :Optional[Any] ) -> Tuple:
return self.time_input_dim
@property
def __magic_name__( self :Any ) -> Optional[int]:
return self.time_input_dim * 4
@property
def __magic_name__( self :Union[str, Any] ) -> Any:
return 100
@property
def __magic_name__( self :List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __magic_name__( self :Union[str, Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
__SCREAMING_SNAKE_CASE : Tuple = MultilingualCLIP(_lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = text_encoder.eval()
return text_encoder
@property
def __magic_name__( self :Any ) -> List[str]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDConditionModel(**_lowercase )
return model
@property
def __magic_name__( self :List[str] ) -> List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__( self :Tuple ) -> int:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : str = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE : List[str] = self.dummy_unet
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq
__SCREAMING_SNAKE_CASE : List[Any] = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowercase )
__SCREAMING_SNAKE_CASE : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=0 ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowercase ) ).to(_lowercase )
__SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowercase )
# create init_image
__SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = Image.fromarray(np.uinta(_lowercase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowercase ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(_lowercase )
else:
__SCREAMING_SNAKE_CASE : int = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : str = '''cpu'''
__SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class(**_lowercase )
__SCREAMING_SNAKE_CASE : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(_lowercase ) )
__SCREAMING_SNAKE_CASE : Any = output.images
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
__SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
__SCREAMING_SNAKE_CASE : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__SCREAMING_SNAKE_CASE : Any = '''A red cartoon frog, 4k'''
__SCREAMING_SNAKE_CASE : int = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
__SCREAMING_SNAKE_CASE : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__SCREAMING_SNAKE_CASE : str = pipeline(
_lowercase , image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 696 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowercase = {'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
_lowercase : Any = input('Enter integers separated by spaces: ')
_lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 49 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : List[str]=13 , UpperCamelCase : List[Any]=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=True , UpperCamelCase : Dict=99 , UpperCamelCase : Any=32 , UpperCamelCase : Dict=2 , UpperCamelCase : int=4 , UpperCamelCase : str=37 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Tuple=5_12 , UpperCamelCase : str=16 , UpperCamelCase : List[str]=2 , UpperCamelCase : List[str]=0.02 , UpperCamelCase : List[Any]=3 , UpperCamelCase : Tuple=4 , UpperCamelCase : Dict=None , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Tuple = 13
_snake_case : Optional[Any] = 7
_snake_case : Union[str, Any] = True
_snake_case : List[str] = True
_snake_case : str = True
_snake_case : Any = True
_snake_case : Dict = 99
_snake_case : Any = 3_84
_snake_case : Dict = 2
_snake_case : Tuple = 4
_snake_case : str = 37
_snake_case : List[str] = 'gelu'
_snake_case : Union[str, Any] = 0.1
_snake_case : int = 0.1
_snake_case : str = 5_12
_snake_case : Optional[Any] = 16
_snake_case : int = 2
_snake_case : Union[str, Any] = 0.02
_snake_case : Tuple = 3
_snake_case : List[Any] = 4
_snake_case : Optional[Any] = 1_28
_snake_case : int = 2
_snake_case : Union[str, Any] = 9
_snake_case : Tuple = 1
_snake_case : Optional[Any] = None
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Union[str, Any] = None
if self.use_input_mask:
_snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : List[Any] = None
if self.use_token_type_ids:
_snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case : Tuple = None
_snake_case : str = None
_snake_case : Union[str, Any] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Dict = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : List[Any] = TFConvBertModel(config=_lowercase )
_snake_case : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case : List[str] = [input_ids, input_mask]
_snake_case : Any = model(_lowercase )
_snake_case : int = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : str , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = TFConvBertForMaskedLM(config=_lowercase )
_snake_case : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_snake_case : Optional[Any] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : Dict = self.num_labels
_snake_case : Union[str, Any] = TFConvBertForSequenceClassification(config=_lowercase )
_snake_case : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_snake_case : Union[str, Any] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : List[str] = self.num_choices
_snake_case : str = TFConvBertForMultipleChoice(config=_lowercase )
_snake_case : str = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_snake_case : Union[str, Any] = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_snake_case : List[str] = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_snake_case : Any = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_snake_case : Union[str, Any] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Any = self.num_labels
_snake_case : int = TFConvBertForTokenClassification(config=_lowercase )
_snake_case : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_snake_case : List[str] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : Tuple = TFConvBertForQuestionAnswering(config=_lowercase )
_snake_case : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_snake_case : List[str] = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : int = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Optional[Any] = config_and_inputs
_snake_case : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a_ : List[str] =(
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : int =False
a_ : List[str] =False
a_ : Dict =False
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = TFConvBertModelTester(self )
_snake_case : str = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Optional[int] = True
_snake_case : str = True
if hasattr(_lowercase , 'use_cache' ):
_snake_case : Any = True
_snake_case : Union[str, Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
_snake_case : List[Any] = getattr(self.model_tester , 'key_length' , _lowercase )
for model_class in self.all_model_classes:
_snake_case : List[Any] = self._prepare_for_class(_lowercase , _lowercase )
_snake_case : Optional[Any] = model_class(_lowercase )
_snake_case : Union[str, Any] = len(model(_lowercase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase , saved_model=_lowercase )
_snake_case : Any = os.path.join(_lowercase , 'saved_model' , '1' )
_snake_case : Optional[int] = tf.keras.models.load_model(_lowercase )
_snake_case : List[Any] = model(_lowercase )
if self.is_encoder_decoder:
_snake_case : Union[str, Any] = outputs['encoder_hidden_states']
_snake_case : List[Any] = outputs['encoder_attentions']
else:
_snake_case : str = outputs['hidden_states']
_snake_case : int = outputs['attentions']
self.assertEqual(len(_lowercase ) , _lowercase )
_snake_case : Dict = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
_snake_case : Any = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
_snake_case : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
_snake_case : str = getattr(self.model_tester , 'key_length' , _lowercase )
_snake_case : Optional[int] = getattr(self.model_tester , 'key_length' , _lowercase )
def check_decoder_attentions_output(UpperCamelCase : Any ):
_snake_case : Any = len(_lowercase )
self.assertEqual(out_len % 2 , 0 )
_snake_case : List[Any] = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase : Tuple ):
_snake_case : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_snake_case : Dict = True
_snake_case : str = False
_snake_case : Tuple = model_class(_lowercase )
_snake_case : Dict = model(self._prepare_for_class(_lowercase , _lowercase ) )
_snake_case : Any = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
_snake_case : int = model_class(_lowercase )
_snake_case : Union[str, Any] = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : Optional[Any] = True
_snake_case : Dict = model_class(_lowercase )
_snake_case : Tuple = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
_snake_case : Dict = True
_snake_case : int = True
_snake_case : str = model_class(_lowercase )
_snake_case : Dict = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : int = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
_snake_case : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
_snake_case : int = model(_lowercase )[0]
_snake_case : List[str] = [1, 6, 7_68]
self.assertEqual(output.shape , _lowercase )
_snake_case : Union[str, Any] = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1e-4 )
| 411 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 691 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.