code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
if number != int(UpperCamelCase ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
lowerCamelCase__ : Tuple = [-1] * (number + 1)
lowerCamelCase__ : Optional[Any] = 0
for i in range(1 , number + 1 ):
lowerCamelCase__ : Optional[Any] = sys.maxsize
lowerCamelCase__ : Optional[Any] = int(math.sqrt(UpperCamelCase ) )
for j in range(1 , root + 1 ):
lowerCamelCase__ : Optional[Any] = 1 + answers[i - (j**2)]
lowerCamelCase__ : Optional[Any] = min(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : str = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Tuple = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCamelCase, 'width_multiplier'))
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=64, lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase="swish", lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=None, lowerCamelCase=0.2_5, lowerCamelCase=0.0, lowerCamelCase=0.0, ) -> Any:
"""simple docstring"""
_lowercase : Any = parent
_lowercase : Optional[int] = batch_size
_lowercase : Dict = image_size
_lowercase : str = patch_size
_lowercase : Optional[int] = num_channels
_lowercase : Optional[Any] = make_divisible(5_12 * width_multiplier, divisor=8)
_lowercase : str = hidden_act
_lowercase : Dict = conv_kernel_size
_lowercase : int = output_stride
_lowercase : Optional[Any] = classifier_dropout_prob
_lowercase : Tuple = use_labels
_lowercase : int = is_training
_lowercase : Optional[Any] = num_labels
_lowercase : Dict = initializer_range
_lowercase : List[str] = scope
_lowercase : Tuple = width_multiplier
_lowercase : List[str] = ffn_dropout
_lowercase : Dict = attn_dropout
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : Dict = None
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels)
_lowercase : str = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : int = self.num_labels
_lowercase : Optional[int] = MobileViTVaForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
_lowercase : List[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : int = config_and_inputs
_lowercase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ : Dict = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ : List[Any] = False
lowercase_ : Optional[int] = False
lowercase_ : List[Any] = False
lowercase_ : Tuple = False
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = MobileViTVaModelTester(self)
_lowercase : Tuple = MobileViTVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.')
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(lowerCamelCase)
_lowercase : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Any = [*signature.parameters.keys()]
_lowercase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : Optional[Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : List[Any] = outputs.hidden_states
_lowercase : Tuple = 5
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowercase : Optional[int] = 2
for i in range(len(lowerCamelCase)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2)
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Tuple = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Optional[Any] = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = MobileViTVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> Dict:
_lowercase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256')
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256').to(
lowerCamelCase)
_lowercase : Dict = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Tuple = model(**lowerCamelCase)
# verify the logits
_lowercase : Optional[int] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Optional[int] = model.to(lowerCamelCase)
_lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Tuple = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : List[Any] = model(**lowerCamelCase)
_lowercase : str = outputs.logits
# verify the logits
_lowercase : Tuple = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
], device=lowerCamelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Tuple = model.to(lowerCamelCase)
_lowercase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : int = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**lowerCamelCase)
_lowercase : Any = outputs.logits.detach().cpu()
_lowercase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase, target_sizes=[(50, 60)])
_lowercase : Any = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
_lowercase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase)
_lowercase : Optional[int] = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
| 21 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = ["""image_processor""", """tokenizer"""]
__lowercase = """BlipImageProcessor"""
__lowercase = """AutoTokenizer"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
# add QFormer tokenizer
_snake_case = qformer_tokenizer
def __call__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_snake_case = BatchFeature()
if text is not None:
_snake_case = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
encoding.update(lowerCAmelCase_ )
_snake_case = self.qformer_tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = qformer_text_encoding.pop('input_ids' )
_snake_case = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_snake_case = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
encoding.update(lowerCAmelCase_ )
return encoding
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tokenizer.model_input_names
_snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
if os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_snake_case = os.path.join(lowerCAmelCase_ , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(lowerCAmelCase_ )
return super().save_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def lowerCamelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , subfolder='qformer_tokenizer' )
_snake_case = cls._get_arguments_from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
args.append(lowerCAmelCase_ )
return cls(*lowerCAmelCase_ )
| 42 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE : str = "bart"
SCREAMING_SNAKE_CASE : Optional[int] = True
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> int:
if LOAD_DENSE_INDEX:
_lowercase : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
_lowercase : Union[str, Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
_lowercase : str = qar_model.eval()
else:
_lowercase , _lowercase : Any = (None, None)
if MODEL_TYPE == "bart":
_lowercase : Dict = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
_lowercase : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
_lowercase : Any = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
_lowercase : List[Any] = sas_model.eval()
else:
_lowercase , _lowercase : Union[str, Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> str:
if LOAD_DENSE_INDEX:
_lowercase : Optional[Any] = faiss.StandardGpuResources()
_lowercase : Optional[int] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
_lowercase : Tuple = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
_lowercase : Any = faiss.IndexFlatIP(128 )
_lowercase : Union[str, Any] = faiss.index_cpu_to_gpu(lowerCamelCase_ , 1 , lowerCamelCase_ )
wikiaab_gpu_index_flat.add(lowerCamelCase_ ) # TODO fix for larger GPU
else:
_lowercase , _lowercase : Any = (None, None)
_lowercase : List[str] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> Any:
_lowercase : List[str] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
_lowercase : Optional[Any] = elia['train_eli5']
_lowercase : Tuple = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
_lowercase : Union[str, Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCamelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = load_train_data()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=10 ) -> List[str]:
_lowercase : Any = embed_questions_for_retrieval([question] , lowerCamelCase_ , lowerCamelCase_ )
_lowercase , _lowercase : List[str] = eli5_train_q_index.search(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = [elia_train[int(lowerCamelCase_ )] for i in I[0]]
return nn_examples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_="wiki40b" , lowerCamelCase_="dense" , lowerCamelCase_=10 ) -> Dict:
if source == "none":
_lowercase , _lowercase : Union[str, Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_lowercase , _lowercase : Dict = query_qa_dense_index(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
_lowercase , _lowercase : str = query_es_index(
lowerCamelCase_ , lowerCamelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=lowerCamelCase_ , )
_lowercase : List[Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
_lowercase : Union[str, Any] = 'question: {} context: {}'.format(lowerCamelCase_ , lowerCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase_ : None),
} )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_=256 , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=0.95 , lowerCamelCase_=0.8 ) -> Dict:
with torch.no_grad():
_lowercase : str = qa_sas_generate(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_answers=1 , num_beams=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ , do_sample=lowerCamelCase_ , temp=lowerCamelCase_ , top_p=lowerCamelCase_ , top_k=lowerCamelCase_ , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
SCREAMING_SNAKE_CASE : Union[str, Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
SCREAMING_SNAKE_CASE : List[Any] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE : Any = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
SCREAMING_SNAKE_CASE : Optional[int] = st.sidebar.checkbox("Demo options")
if demo_options:
SCREAMING_SNAKE_CASE : List[str] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE : Optional[int] = action_list.index(action_st)
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
SCREAMING_SNAKE_CASE : int = show_type == "Show full text of passages"
else:
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
SCREAMING_SNAKE_CASE : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
SCREAMING_SNAKE_CASE : int = "wiki40b"
SCREAMING_SNAKE_CASE : int = "dense"
SCREAMING_SNAKE_CASE : str = "beam"
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : List[str] = 64
SCREAMING_SNAKE_CASE : Union[str, Any] = 256
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Generation options")
if generate_options:
SCREAMING_SNAKE_CASE : Any = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE : List[Any] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE : int = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE : int = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : Any = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : str = None
# start main text
SCREAMING_SNAKE_CASE : List[str] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
SCREAMING_SNAKE_CASE : str = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE : List[str] = st.text_input("Enter your question here:", "")
else:
SCREAMING_SNAKE_CASE : Optional[int] = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = make_support(question, source=wiki_source, method="dense", n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method="sparse", n_results=10)
SCREAMING_SNAKE_CASE : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE : Optional[Any] = support_list[:10]
SCREAMING_SNAKE_CASE : int = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE : Optional[Any] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
SCREAMING_SNAKE_CASE : List[Any] = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE : Union[str, Any] = "[{}]({})".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE : Any = sec_titles.split(" & ")
SCREAMING_SNAKE_CASE : List[Any] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE : str = find_nearest_training(question)
SCREAMING_SNAKE_CASE : Any = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
SCREAMING_SNAKE_CASE : str = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
SCREAMING_SNAKE_CASE : Tuple = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 21 | 0 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__lowercase = logging.getLogger(__name__)
__lowercase = 50 # max width of layer names
__lowercase = 70 # max width of quantizer names
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=SCREAMING_SNAKE_CASE , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=SCREAMING_SNAKE_CASE , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=SCREAMING_SNAKE_CASE , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=SCREAMING_SNAKE_CASE , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=SCREAMING_SNAKE_CASE , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=SCREAMING_SNAKE_CASE , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if args.calibrator == "max":
__UpperCamelCase :Union[str, Any] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
__UpperCamelCase :str = '''histogram'''
elif args.calibrator == "mse":
__UpperCamelCase :Union[str, Any] = '''histogram'''
else:
raise ValueError(f"""Invalid calibrator {args.calibrator}""" )
__UpperCamelCase :List[str] = QuantDescriptor(num_bits=args.aprec , calib_method=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(SCREAMING_SNAKE_CASE )
quant_nn.QuantLinear.set_default_quant_desc_weight(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
logger.info('''Configuring Model for Quantization''' )
logger.info(f"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , ['''embeddings'''] , which='''weight''' , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_disable:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , [''''''] , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_disable_keyword:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , args.quant_disable_keyword , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_disable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_enable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=SCREAMING_SNAKE_CASE )
if args.recalibrate_weights:
recalibrate_weights(SCREAMING_SNAKE_CASE )
if args.fuse_qkv:
fuse_qkv(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if args.clip_gelu:
clip_gelu(SCREAMING_SNAKE_CASE , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"""{name:80}: {module}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def fusea(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for mod in [qq, qk, qv]:
if not hasattr(SCREAMING_SNAKE_CASE , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
__UpperCamelCase :Tuple = qq._amax.detach().item()
__UpperCamelCase :List[str] = qk._amax.detach().item()
__UpperCamelCase :Dict = qv._amax.detach().item()
__UpperCamelCase :Any = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
qq._amax.fill_(SCREAMING_SNAKE_CASE )
qk._amax.fill_(SCREAMING_SNAKE_CASE )
qv._amax.fill_(SCREAMING_SNAKE_CASE )
logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
__UpperCamelCase :List[str] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = mod._input_quantizer._amax.data.detach().item()
logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
__UpperCamelCase :Dict = mod.weight.shape[0]
__UpperCamelCase :Tuple = mod._weight_quantizer._amax.detach()
__UpperCamelCase :str = torch.ones(SCREAMING_SNAKE_CASE , dtype=amax.dtype , device=amax.device ) * amax
print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__UpperCamelCase :Tuple = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
__UpperCamelCase :List[str] = set(range(len(mod.weight.size() ) ) ) - axis_set
__UpperCamelCase :Tuple = pytorch_quantization.utils.reduce_amax(mod.weight , axis=SCREAMING_SNAKE_CASE , keepdims=SCREAMING_SNAKE_CASE ).detach()
logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
__UpperCamelCase :List[Any] = amax
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=25 , SCREAMING_SNAKE_CASE=180 , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if ignore is None:
__UpperCamelCase :Optional[int] = []
elif not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = [ignore]
__UpperCamelCase :str = 0
for name, mod in model.named_modules():
if not hasattr(SCREAMING_SNAKE_CASE , '''weight''' ):
continue
__UpperCamelCase :Any = max(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
for name, mod in model.named_modules():
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , '''_input_quantizer''' , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , '''_weight_quantizer''' , SCREAMING_SNAKE_CASE )
if not hasattr(SCREAMING_SNAKE_CASE , '''weight''' ):
continue
if type(SCREAMING_SNAKE_CASE ) in ignore:
continue
if [True for s in ignore if type(SCREAMING_SNAKE_CASE ) is str and s in name]:
continue
__UpperCamelCase :Optional[int] = f"""Act:{input_q.extra_repr()}"""
__UpperCamelCase :List[str] = f"""Wgt:{weight_q.extra_repr()}"""
__UpperCamelCase :int = f"""{name:{name_width}} {act_str} {wgt_str}"""
if len(SCREAMING_SNAKE_CASE ) <= line_width:
logger.info(SCREAMING_SNAKE_CASE )
else:
logger.info(f"""{name:{name_width}} {act_str}""" )
logger.info(f"""{' ':{name_width}} {wgt_str}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = 0
for name, mod in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , pytorch_quantization.nn.TensorQuantizer ):
print(f"""{name:80} {mod}""" )
count += 1
print(f"""{count} TensorQuantizers found in model""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if quantizer_mod is not None:
assert hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
logger.warning(f"""{name} has no {quantizer}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="both" , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = f"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''_input_quantizer''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if which in ["weight", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''_weight_quantizer''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE , '''_input_quantizer''' ) or hasattr(SCREAMING_SNAKE_CASE , '''_weight_quantizer''' ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
set_quantizers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :str = f"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(SCREAMING_SNAKE_CASE )
| 43 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : List[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _lowerCamelCase( _a ):
lowercase_ : Any = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
SCREAMING_SNAKE_CASE : Any = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
SCREAMING_SNAKE_CASE : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class _lowerCamelCase:
def __call__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
elif titles is None or texts is None:
_lowercase : Dict = titles if texts is None else texts
return super().__call__(
lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
_lowercase : Union[str, Any] = titles if not isinstance(lowerCamelCase, lowerCamelCase) else [titles]
_lowercase : Tuple = texts if not isinstance(lowerCamelCase, lowerCamelCase) else [texts]
_lowercase : Optional[Any] = len(lowerCamelCase)
_lowercase : Any = questions if not isinstance(lowerCamelCase, lowerCamelCase) else [questions] * n_passages
if len(lowerCamelCase) != len(lowerCamelCase):
raise ValueError(
F'''There should be as many titles than texts but got {len(lowerCamelCase)} titles and {len(lowerCamelCase)} texts.''')
_lowercase : Any = super().__call__(lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : Tuple = super().__call__(lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : int = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase, lowerCamelCase)
]
}
if return_attention_mask is not False:
_lowercase : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_lowercase : Union[str, Any] = attention_mask
return self.pad(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 16, lowerCamelCase = 64, lowerCamelCase = 4, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : Union[str, Any] = reader_input['input_ids']
_lowercase , _lowercase , _lowercase : Tuple = reader_output[:3]
_lowercase : Tuple = len(lowerCamelCase)
_lowercase : str = sorted(range(lowerCamelCase), reverse=lowerCamelCase, key=relevance_logits.__getitem__)
_lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowercase : str = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_lowercase : Any = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowercase : List[Any] = sequence_ids.index(self.pad_token_id)
else:
_lowercase : List[str] = len(lowerCamelCase)
_lowercase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCamelCase, top_spans=lowerCamelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCamelCase, start_index=lowerCamelCase, end_index=lowerCamelCase, text=self.decode(sequence_ids[start_index : end_index + 1]), ))
if len(lowerCamelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : str = []
for start_index, start_score in enumerate(lowerCamelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_lowercase : Dict = sorted(lowerCamelCase, key=lambda lowerCamelCase: x[1], reverse=lowerCamelCase)
_lowercase : List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''')
_lowercase : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCamelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class _lowerCamelCase( _a, _a ):
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Any = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
lowercase_ : str = ["""input_ids""", """attention_mask"""]
| 21 | 0 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = BertJapaneseTokenizer
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : List[Any] = True
def __A ( self ):
super().setUp()
_lowerCAmelCase : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __A ( self , a__ ):
_lowerCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。"""
_lowerCAmelCase : Tuple = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def __A ( self , a__ ):
_lowerCAmelCase , _lowerCAmelCase : int = self.get_input_output_texts(a__ )
_lowerCAmelCase : List[Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
return text, ids
def __A ( self ):
pass # TODO add if relevant
def __A ( self ):
pass # TODO add if relevant
def __A ( self ):
pass # TODO add if relevant
def __A ( self ):
_lowerCAmelCase : int = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase : str = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(a__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __A ( self ):
_lowerCAmelCase : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(a__ )
_lowerCAmelCase : str = """こんにちは、世界。\nこんばんは、世界。"""
_lowerCAmelCase : int = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(a__ , """wb""" ) as handle:
pickle.dump(a__ , a__ )
with open(a__ , """rb""" ) as handle:
_lowerCAmelCase : List[Any] = pickle.load(a__ )
_lowerCAmelCase : List[Any] = tokenizer_new.tokenize(a__ )
self.assertListEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Dict = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def __A ( self ):
try:
_lowerCAmelCase : List[Any] = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def __A ( self ):
try:
_lowerCAmelCase : str = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def __A ( self ):
_lowerCAmelCase : Optional[int] = MecabTokenizer(do_lower_case=a__ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def __A ( self ):
try:
_lowerCAmelCase : Any = MecabTokenizer(
do_lower_case=a__ , normalize_text=a__ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def __A ( self ):
_lowerCAmelCase : List[str] = MecabTokenizer(normalize_text=a__ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(a__ )
_lowerCAmelCase : List[str] = """こんにちは、世界。\nこんばんは、世界。"""
_lowerCAmelCase : int = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(a__ , """wb""" ) as handle:
pickle.dump(a__ , a__ )
with open(a__ , """rb""" ) as handle:
_lowerCAmelCase : Union[str, Any] = pickle.load(a__ )
_lowerCAmelCase : Tuple = tokenizer_new.tokenize(a__ )
self.assertListEqual(a__ , a__ )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : Optional[int] = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : Tuple = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : Dict = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : Dict = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : str = SudachiTokenizer(do_lower_case=a__ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : Dict = SudachiTokenizer(normalize_text=a__ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : int = SudachiTokenizer(trim_whitespace=a__ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(a__ )
_lowerCAmelCase : Any = """こんにちは、世界。\nこんばんは、世界。"""
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(a__ , """wb""" ) as handle:
pickle.dump(a__ , a__ )
with open(a__ , """rb""" ) as handle:
_lowerCAmelCase : List[Any] = pickle.load(a__ )
_lowerCAmelCase : str = tokenizer_new.tokenize(a__ )
self.assertListEqual(a__ , a__ )
@require_jumanpp
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def __A ( self ):
_lowerCAmelCase : str = JumanppTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def __A ( self ):
_lowerCAmelCase : str = JumanppTokenizer(normalize_text=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = JumanppTokenizer(trim_whitespace=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def __A ( self ):
_lowerCAmelCase : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def __A ( self ):
_lowerCAmelCase : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
_lowerCAmelCase : Union[str, Any] = {}
for i, token in enumerate(a__ ):
_lowerCAmelCase : List[str] = i
_lowerCAmelCase : Optional[int] = WordpieceTokenizer(vocab=a__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def __A ( self ):
_lowerCAmelCase : Any = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
_lowerCAmelCase : Union[str, Any] = tokenizer.subword_tokenizer
_lowerCAmelCase : Any = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(a__ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
_lowerCAmelCase : Optional[int] = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(a__ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode("""ありがとう。""" , add_special_tokens=a__ )
_lowerCAmelCase : Optional[Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=a__ )
_lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(a__ )
_lowerCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = BertJapaneseTokenizer
_UpperCamelCase : Optional[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __A ( self , **a__ ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Dict = """こんにちは、世界。 \nこんばんは、世界。"""
_lowerCAmelCase : Optional[Any] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def __A ( self ):
pass # TODO add if relevant
def __A ( self ):
pass # TODO add if relevant
def __A ( self ):
pass # TODO add if relevant
def __A ( self ):
_lowerCAmelCase : int = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
a__ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_lowerCAmelCase : List[Any] = {}
for i, token in enumerate(a__ ):
_lowerCAmelCase : int = i
_lowerCAmelCase : str = CharacterTokenizer(vocab=a__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def __A ( self ):
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
_lowerCAmelCase : List[Any] = tokenizer.encode("""ありがとう。""" , add_special_tokens=a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(a__ )
_lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = """cl-tohoku/bert-base-japanese"""
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(a__ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
_lowerCAmelCase : Dict = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(a__ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 44 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not numbers:
return 0
if not isinstance(lowerCamelCase_ , (list, tuple) ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
_lowercase : int = numbers[0]
for i in range(1 , len(lowerCamelCase_ ) ):
# update the maximum and minimum subarray products
_lowercase : Union[str, Any] = numbers[i]
if number < 0:
_lowercase , _lowercase : Any = min_till_now, max_till_now
_lowercase : Union[str, Any] = max(lowerCamelCase_ , max_till_now * number )
_lowercase : Union[str, Any] = min(lowerCamelCase_ , min_till_now * number )
# update the maximum product found till now
_lowercase : Optional[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
return max_prod
| 21 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = set()
# Replace all the whitespace in our sentence
__a = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCAmelCase__ ) == 26
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = [False] * 26
for char in input_str:
if char.islower():
__a = True
elif char.isupper():
__a = True
return all(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowercase ( ) -> None:
from timeit import timeit
__a = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_faster()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_fastest()''' , setup=lowerCAmelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 45 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE : Tuple = (3, 9, -11, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowerCamelCase:
lowercase_ : int
lowercase_ : Node | None
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Node | None = None
for i in sorted(lowerCamelCase, reverse=lowerCamelCase):
_lowercase : Tuple = Node(lowerCamelCase, self.head)
def __iter__( self) -> Iterator[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.head
while node:
yield node.data
_lowercase : int = node.next_node
def __len__( self) -> int:
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self) -> str:
"""simple docstring"""
return " -> ".join([str(lowerCamelCase) for node in self])
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> SortedLinkedList:
return SortedLinkedList(list(lowerCamelCase_ ) + list(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 21 | 0 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class lowercase ( _UpperCAmelCase ):
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
super().__init__(*lowercase , **lowercase )
lowerCAmelCase = {}
def _snake_case ( self , lowercase , *lowercase , **lowercase ) -> int:
lowerCAmelCase = super().add_tokens(lowercase , *lowercase , **lowercase )
if num_added_tokens == 0:
raise ValueError(
f'The tokenizer already contains the token {placeholder_token}. Please pass a different'
""" `placeholder_token` that is not already in the tokenizer.""" )
def _snake_case ( self , lowercase , *lowercase , lowercase=1 , **lowercase ) -> str:
lowerCAmelCase = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowercase , *lowercase , **lowercase )
output.append(lowercase )
else:
lowerCAmelCase = []
for i in range(lowercase ):
lowerCAmelCase = placeholder_token + f'_{i}'
self.try_adding_tokens(lowercase , *lowercase , **lowercase )
output.append(lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'The tokenizer already has placeholder token {token} that can get confused with'
f' {placeholder_token}keep placeholder tokens independent' )
lowerCAmelCase = output
def _snake_case ( self , lowercase , lowercase=False , lowercase=1.0 ) -> List[str]:
if isinstance(lowercase , lowercase ):
lowerCAmelCase = []
for i in range(len(lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCAmelCase = self.token_map[placeholder_token]
lowerCAmelCase = tokens[: 1 + int(len(lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCAmelCase = copy.copy(lowercase )
random.shuffle(lowercase )
lowerCAmelCase = text.replace(lowercase , """ """.join(lowercase ) )
return text
def __call__( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> Tuple:
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
def _snake_case ( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> Any:
return super().encode(
self.replace_placeholder_tokens_in_text(
lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
| 46 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Any = KandinskyImgaImgPipeline
lowercase_ : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
lowercase_ : Any = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
lowercase_ : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ : Union[str, Any] = False
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return 1_00
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=10_05, )
_lowercase : Optional[int] = MultilingualCLIP(lowerCamelCase)
_lowercase : List[str] = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Union[str, Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase : Optional[Any] = UNetaDConditionModel(**lowerCamelCase)
return model
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.dummy_text_encoder
_lowercase : List[Any] = self.dummy_tokenizer
_lowercase : int = self.dummy_unet
_lowercase : int = self.dummy_movq
_lowercase : Optional[int] = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowercase : List[Any] = DDIMScheduler(**lowerCamelCase)
_lowercase : List[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Dict:
"""simple docstring"""
_lowercase : List[str] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[Any] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(lowerCamelCase)
# create init_image
_lowercase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[int] = image.cpu().permute(0, 2, 3, 1)[0]
_lowercase : Tuple = Image.fromarray(np.uinta(lowerCamelCase)).convert('RGB').resize((2_56, 2_56))
if str(lowerCamelCase).startswith('mps'):
_lowercase : List[str] = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = 'cpu'
_lowercase : Tuple = self.get_dummy_components()
_lowercase : str = self.pipeline_class(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = pipe(**self.get_dummy_inputs(lowerCamelCase))
_lowercase : Optional[int] = output.images
_lowercase : List[Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase), return_dict=lowerCamelCase, )[0]
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : Tuple = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy')
_lowercase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
_lowercase : Optional[int] = 'A red cartoon frog, 4k'
_lowercase : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior', torch_dtype=torch.floataa)
pipe_prior.to(lowerCamelCase)
_lowercase : Optional[Any] = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1', torch_dtype=torch.floataa)
_lowercase : List[Any] = pipeline.to(lowerCamelCase)
pipeline.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = torch.Generator(device='cpu').manual_seed(0)
_lowercase , _lowercase : List[Any] = pipe_prior(
lowerCamelCase, generator=lowerCamelCase, num_inference_steps=5, negative_prompt='', ).to_tuple()
_lowercase : Union[str, Any] = pipeline(
lowerCamelCase, image=lowerCamelCase, image_embeds=lowerCamelCase, negative_image_embeds=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=1_00, height=7_68, width=7_68, strength=0.2, output_type='np', )
_lowercase : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase)
| 21 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : List[Any] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 47 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _lowerCamelCase( _a ):
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
super().__init__(*lowerCamelCase, **lowerCamelCase)
requires_backends(self, 'vision')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def UpperCamelCase ( self, lowerCamelCase=None) -> int:
"""simple docstring"""
_lowercase : Dict = {}
if top_k is not None:
_lowercase : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self, lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
return super().__call__(lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = load_image(lowerCamelCase)
_lowercase : List[str] = self.image_processor(images=lowerCamelCase, return_tensors=self.framework)
return model_inputs
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.model(**lowerCamelCase)
return model_outputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=5) -> Dict:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowercase : List[Any] = self.model.config.num_labels
if self.framework == "pt":
_lowercase : int = model_outputs.logits.softmax(-1)[0]
_lowercase , _lowercase : Union[str, Any] = probs.topk(lowerCamelCase)
elif self.framework == "tf":
_lowercase : int = stable_softmax(model_outputs.logits, axis=-1)[0]
_lowercase : List[Any] = tf.math.top_k(lowerCamelCase, k=lowerCamelCase)
_lowercase , _lowercase : Any = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''')
_lowercase : str = scores.tolist()
_lowercase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase, lowerCamelCase)]
| 21 | 0 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 48 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
_lowercase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase_( ) -> Optional[int]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = jnp.ones((batch_size, length)) / length
return scores
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = None
__a = 20
__a = self._get_uniform_logits(batch_size=2 , length=__SCREAMING_SNAKE_CASE)
# tweak scores to not be uniform anymore
__a = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
__a = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
__a = jax.nn.softmax(__SCREAMING_SNAKE_CASE , axis=-1)
__a = FlaxTemperatureLogitsWarper(temperature=0.5)
__a = FlaxTemperatureLogitsWarper(temperature=1.3)
__a = jax.nn.softmax(temp_dist_warper_sharper(__SCREAMING_SNAKE_CASE , scores.copy() , cur_len=__SCREAMING_SNAKE_CASE) , axis=-1)
__a = jax.nn.softmax(temp_dist_warper_smoother(__SCREAMING_SNAKE_CASE , scores.copy() , cur_len=__SCREAMING_SNAKE_CASE) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = None
__a = 10
__a = 2
# create ramp distribution
__a = np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE)[None, :] , (batch_size, vocab_size)).copy()
__a = ramp_logits[1:, : vocab_size // 2] + vocab_size
__a = FlaxTopKLogitsWarper(3)
__a = top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
__a = 5
__a = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
__a = np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE)[None, :] , (batch_size, length)).copy()
__a = top_k_warp_safety_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = None
__a = 10
__a = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__a = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
__a = FlaxTopPLogitsWarper(0.8)
__a = np.exp(top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__a = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3))
# check edge cases with negative and extreme logits
__a = np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__a = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
__a = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
__a = top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = 20
__a = 4
__a = 0
__a = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__SCREAMING_SNAKE_CASE)
# check that min length is applied at length 5
__a = ids_tensor((batch_size, 20) , vocab_size=20)
__a = 5
__a = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = min_dist_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''')])
# check that min length is not applied anymore at length 15
__a = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = 15
__a = min_dist_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE).any())
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = 20
__a = 4
__a = 0
__a = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE)
# check that all scores are -inf except the bos_token_id score
__a = ids_tensor((batch_size, 1) , vocab_size=20)
__a = 1
__a = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__a = 3
__a = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE).any())
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = 20
__a = 4
__a = 0
__a = 5
__a = FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE)
# check that all scores are -inf except the eos_token_id when max_length is reached
__a = ids_tensor((batch_size, 4) , vocab_size=20)
__a = 4
__a = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__a = 3
__a = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE).any())
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = 4
__a = 10
__a = 15
__a = 2
__a = 1
__a = 15
# dummy input_ids and scores
__a = ids_tensor((batch_size, sequence_length) , __SCREAMING_SNAKE_CASE)
__a = input_ids.copy()
__a = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = scores.copy()
# instantiate all dist processors
__a = FlaxTemperatureLogitsWarper(temperature=0.5)
__a = FlaxTopKLogitsWarper(3)
__a = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
__a = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__SCREAMING_SNAKE_CASE)
__a = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE)
__a = FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE)
__a = 10
# no processor list
__a = temp_dist_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
__a = top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
__a = top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
__a = min_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
__a = bos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
__a = eos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
# with processor list
__a = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
__a = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
# scores should be equal
self.assertTrue(jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = 4
__a = 10
__a = 15
__a = 2
__a = 1
__a = 15
# dummy input_ids and scores
__a = ids_tensor((batch_size, sequence_length) , __SCREAMING_SNAKE_CASE)
__a = input_ids.copy()
__a = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = scores.copy()
# instantiate all dist processors
__a = FlaxTemperatureLogitsWarper(temperature=0.5)
__a = FlaxTopKLogitsWarper(3)
__a = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
__a = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__SCREAMING_SNAKE_CASE)
__a = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE)
__a = FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE)
__a = 10
# no processor list
def run_no_processor_list(__SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
__a = temp_dist_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
__a = top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
__a = top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
__a = min_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
__a = bos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
__a = eos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
return scores
# with processor list
def run_processor_list(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any]):
__a = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
__a = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE)
return scores
__a = jax.jit(__SCREAMING_SNAKE_CASE)
__a = jax.jit(__SCREAMING_SNAKE_CASE)
__a = jitted_run_no_processor_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = jitted_run_processor_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# scores should be equal
self.assertTrue(jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 49 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=64, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=3, lowerCamelCase=4, lowerCamelCase=None, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase=1, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Any = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Optional[Any] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : str = vocab_size
_lowercase : List[str] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : List[Any] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Any = num_choices
_lowercase : Tuple = scope
_lowercase : Optional[Any] = q_groups
_lowercase : List[str] = k_groups
_lowercase : Optional[int] = v_groups
_lowercase : List[str] = post_attention_groups
_lowercase : Union[str, Any] = intermediate_groups
_lowercase : int = output_groups
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Any = None
if self.use_input_mask:
_lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : Dict = None
_lowercase : int = None
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = SqueezeBertModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase, lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Dict = SqueezeBertForMaskedLM(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForQuestionAnswering(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_labels
_lowercase : int = SqueezeBertForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Any = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.num_labels
_lowercase : List[str] = SqueezeBertForTokenClassification(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = self.num_choices
_lowercase : str = SqueezeBertForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Dict = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase_ : Optional[int] = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Tuple = False
lowercase_ : List[str] = True
lowercase_ : int = False
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = SqueezeBertModelTester(self)
_lowercase : Dict = ConfigTester(self, config_class=lowerCamelCase, dim=37)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = SqueezeBertModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli')
_lowercase : Optional[int] = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]])
_lowercase : List[str] = model(lowerCamelCase)[0]
_lowercase : Union[str, Any] = torch.Size((1, 3))
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : Tuple = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]])
self.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-4))
| 21 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase : Tuple = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=8 ) -> Dict:
lowerCamelCase__ : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase__ : Dict = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : Tuple , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : VQModel , ) -> str:
super().__init__()
self.register_modules(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , movq=UpperCAmelCase , )
lowerCamelCase__ : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A_ ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Tuple ) -> List[str]:
if latents is None:
lowerCamelCase__ : Dict = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase , dtype=UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase__ : str = latents.to(UpperCAmelCase )
lowerCamelCase__ : List[Any] = latents * scheduler.init_noise_sigma
return latents
def A_ ( self : Tuple , UpperCAmelCase : Tuple=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ : Any = torch.device(F"""cuda:{gpu_id}""" )
lowerCamelCase__ : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Optional[Any] , UpperCAmelCase : int=0 ) -> List[Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCamelCase__ : Union[str, Any] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase__ : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase__ , lowerCamelCase__ : str = cpu_offload_with_hook(UpperCAmelCase , UpperCAmelCase , prev_module_hook=UpperCAmelCase )
# We'll offload the last model manually.
lowerCamelCase__ : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A_ ( self : List[str] ) -> Any:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase )
def __call__( self : str , UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 100 , UpperCAmelCase : float = 4.0 , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ) -> Optional[int]:
lowerCamelCase__ : Any = self._execution_device
lowerCamelCase__ : Dict = guidance_scale > 1.0
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = torch.cat(UpperCAmelCase , dim=0 )
lowerCamelCase__ : int = image_embeds.shape[0] * num_images_per_prompt
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : int = torch.cat(UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ : int = image_embeds.repeat_interleave(UpperCAmelCase , dim=0 )
lowerCamelCase__ : Tuple = negative_image_embeds.repeat_interleave(UpperCAmelCase , dim=0 )
lowerCamelCase__ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase )
self.scheduler.set_timesteps(UpperCAmelCase , device=UpperCAmelCase )
lowerCamelCase__ : Dict = self.scheduler.timesteps
lowerCamelCase__ : Dict = self.unet.config.in_channels
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = downscale_height_and_width(UpperCAmelCase , UpperCAmelCase , self.movq_scale_factor )
# create initial latent
lowerCamelCase__ : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ : str = {'image_embeds': image_embeds}
lowerCamelCase__ : int = self.unet(
sample=UpperCAmelCase , timestep=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , added_cond_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__ : Any = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = noise_pred.chunk(2 )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = variance_pred.chunk(2 )
lowerCamelCase__ : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase__ : List[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Optional[int] = self.scheduler.step(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase , )[0]
# post-processing
lowerCamelCase__ : Dict = self.movq.decode(UpperCAmelCase , force_not_quantize=UpperCAmelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCamelCase__ : Optional[int] = image * 0.5 + 0.5
lowerCamelCase__ : Optional[int] = image.clamp(0 , 1 )
lowerCamelCase__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase__ : Dict = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 50 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Dict = JukeboxTokenizer
lowercase_ : Dict = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
import torch
_lowercase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics')
_lowercase : Optional[Any] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
@require_torch
def UpperCamelCase ( self) -> int:
"""simple docstring"""
import torch
_lowercase : List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics')
_lowercase : List[str] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
| 21 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Dict = BarthezTokenizer
UpperCAmelCase__ : Optional[Any] = BarthezTokenizerFast
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Optional[Any] = True
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''')
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_snake_case)
UpperCAmelCase_ = tokenizer
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''<pad>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-1] , '''<mask>''')
self.assertEqual(len(_snake_case) , 101122)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101122)
@require_torch
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ = [0, 57, 3018, 70307, 91, 2]
UpperCAmelCase_ = self.tokenizer(
_snake_case , max_length=len(_snake_case) , padding=_snake_case , truncation=_snake_case , return_tensors='''pt''')
self.assertIsInstance(_snake_case , _snake_case)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
UpperCAmelCase_ = rust_tokenizer.tokenize(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
UpperCAmelCase_ = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_snake_case)
UpperCAmelCase_ = rust_tokenizer.encode(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCAmelCase_ = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=_snake_case , )
| 51 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowercase_ : int = """ssube/stable-diffusion-x4-upscaler-onnx"""
def UpperCamelCase ( self, lowerCamelCase=0) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = floats_tensor((1, 3, 1_28, 1_28), rng=random.Random(lowerCamelCase))
_lowercase : Union[str, Any] = torch.manual_seed(lowerCamelCase)
_lowercase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : str = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : List[Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : int = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[Any] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs()
_lowercase : List[str] = pipe(**lowerCamelCase).images
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
# using the PNDM scheduler by default
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np', )
_lowercase : List[Any] = output.images
_lowercase : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : List[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
_lowercase : str = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', subfolder='scheduler')
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', scheduler=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=20, generator=lowerCamelCase, output_type='np', )
_lowercase : str = output.images
_lowercase : str = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 21 | 0 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=[1, 1, 2] , A_=1 , A_=32 , A_=4 , A_=8 , A_=37 , A_="gelu_new" , A_=0.1 , A_=0.1 , A_=0.0 , A_=512 , A_=3 , A_=0.02 , A_=3 , A_=4 , A_=None , A_=False , ):
'''simple docstring'''
UpperCamelCase : int = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : List[Any] = seq_length
UpperCamelCase : Tuple = is_training
UpperCamelCase : Optional[Any] = use_input_mask
UpperCamelCase : Any = use_token_type_ids
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Tuple = block_sizes
UpperCamelCase : List[Any] = num_decoder_layers
UpperCamelCase : Optional[Any] = d_model
UpperCamelCase : List[Any] = n_head
UpperCamelCase : Union[str, Any] = d_head
UpperCamelCase : Tuple = d_inner
UpperCamelCase : str = hidden_act
UpperCamelCase : List[Any] = hidden_dropout
UpperCamelCase : Any = attention_dropout
UpperCamelCase : Tuple = activation_dropout
UpperCamelCase : Optional[int] = max_position_embeddings
UpperCamelCase : Tuple = type_vocab_size
UpperCamelCase : List[str] = 2
UpperCamelCase : Optional[Any] = num_labels
UpperCamelCase : Union[str, Any] = num_choices
UpperCamelCase : Tuple = scope
UpperCamelCase : Dict = initializer_std
# Used in the tests to check the size of the first attention layer
UpperCamelCase : str = n_head
# Used in the tests to check the size of the first hidden state
UpperCamelCase : Optional[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
UpperCamelCase : Optional[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
UpperCamelCase : Union[str, Any] = self.num_hidden_layers + 2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : int = None
if self.use_input_mask:
UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Any = None
if self.use_token_type_ids:
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : List[Any] = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Optional[int] = None
if self.use_labels:
UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Dict = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : List[Any] = TFFunnelModel(config=A_ )
UpperCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase : List[str] = model(A_ )
UpperCamelCase : Union[str, Any] = [input_ids, input_mask]
UpperCamelCase : Tuple = model(A_ )
UpperCamelCase : Dict = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
UpperCamelCase : int = False
UpperCamelCase : Optional[int] = TFFunnelModel(config=A_ )
UpperCamelCase : str = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : str = TFFunnelModel(config=A_ )
UpperCamelCase : List[Any] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : List[str] = TFFunnelBaseModel(config=A_ )
UpperCamelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase : Optional[int] = model(A_ )
UpperCamelCase : List[Any] = [input_ids, input_mask]
UpperCamelCase : int = model(A_ )
UpperCamelCase : str = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
UpperCamelCase : str = False
UpperCamelCase : Union[str, Any] = TFFunnelBaseModel(config=A_ )
UpperCamelCase : Optional[int] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : List[Any] = TFFunnelBaseModel(config=A_ )
UpperCamelCase : Dict = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Dict = TFFunnelForPreTraining(config=A_ )
UpperCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFFunnelForMaskedLM(config=A_ )
UpperCamelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase : List[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.num_labels
UpperCamelCase : Dict = TFFunnelForSequenceClassification(config=A_ )
UpperCamelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase : Any = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.num_choices
UpperCamelCase : str = TFFunnelForMultipleChoice(config=A_ )
UpperCamelCase : int = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : str = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Any = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase : List[str] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.num_labels
UpperCamelCase : Optional[Any] = TFFunnelForTokenClassification(config=A_ )
UpperCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase : List[str] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Any = TFFunnelForQuestionAnswering(config=A_ )
UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Dict = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Union[str, Any] = False
_UpperCAmelCase :Tuple = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = TFFunnelModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self , config_class=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
@require_tf
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :List[Any] = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = TFFunnelModelTester(self , base=A_ )
UpperCamelCase : Dict = ConfigTester(self , config_class=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
| 52 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = 1
_lowercase : Any = 3
_lowercase : Tuple = (32, 32)
_lowercase : Tuple = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(lowerCamelCase)
return image
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
return model
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : str = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
return model
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = RobertaSeriesConfig(
hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=50_06, )
return RobertaSeriesModelWithTransformation(lowerCamelCase)
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
def extract(*lowerCamelCase, **lowerCamelCase):
class _lowerCamelCase:
def __init__( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = torch.ones([0])
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
self.pixel_values.to(lowerCamelCase)
return self
return Out()
return extract
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : List[Any] = self.dummy_cond_unet
_lowercase : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase)
_lowercase : Optional[Any] = self.dummy_vae
_lowercase : List[Any] = self.dummy_text_encoder
_lowercase : Any = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
_lowercase : Tuple = 77
_lowercase : int = self.dummy_image.to(lowerCamelCase)
_lowercase : int = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowercase : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
_lowercase : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase)
_lowercase : Optional[int] = alt_pipe.to(lowerCamelCase)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = 'A painting of a squirrel eating a burger'
_lowercase : Dict = torch.Generator(device=lowerCamelCase).manual_seed(0)
_lowercase : Any = alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='np', image=lowerCamelCase, )
_lowercase : Optional[int] = output.images
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(0)
_lowercase : Optional[Any] = alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='np', image=lowerCamelCase, return_dict=lowerCamelCase, )[0]
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
_lowercase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = self.dummy_cond_unet
_lowercase : Tuple = PNDMScheduler(skip_prk_steps=lowerCamelCase)
_lowercase : str = self.dummy_vae
_lowercase : Optional[Any] = self.dummy_text_encoder
_lowercase : Optional[Any] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
_lowercase : Optional[Any] = 77
_lowercase : str = self.dummy_image.to(lowerCamelCase)
# put models in fp16
_lowercase : List[str] = unet.half()
_lowercase : List[Any] = vae.half()
_lowercase : Any = bert.half()
# make sure here that pndm scheduler skips prk
_lowercase : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
_lowercase : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase)
_lowercase : Any = alt_pipe.to(lowerCamelCase)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : int = 'A painting of a squirrel eating a burger'
_lowercase : Optional[Any] = torch.manual_seed(0)
_lowercase : Union[str, Any] = alt_pipe(
[prompt], generator=lowerCamelCase, num_inference_steps=2, output_type='np', image=lowerCamelCase, ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
# resize to resolution that is divisible by 8 but not 16 or 32
_lowercase : str = init_image.resize((7_60, 5_04))
_lowercase : Optional[int] = 'BAAI/AltDiffusion'
_lowercase : str = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
pipe.enable_attention_slicing()
_lowercase : List[str] = 'A fantasy landscape, trending on artstation'
_lowercase : Any = torch.manual_seed(0)
_lowercase : Dict = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCamelCase, output_type='np', )
_lowercase : List[str] = output.images[0]
_lowercase : Tuple = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_lowercase : Optional[Any] = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : str = init_image.resize((7_68, 5_12))
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy')
_lowercase : str = 'BAAI/AltDiffusion'
_lowercase : Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
pipe.enable_attention_slicing()
_lowercase : int = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : int = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCamelCase, output_type='np', )
_lowercase : Union[str, Any] = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1E-2
| 21 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
a__ : Optional[Any] =logging.get_logger(__name__)
a__ : str =OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
a__ : Any =OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
a__ : Union[str, Any] =OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
a__ : Optional[int] =OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
a__ : Any =OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
a__ : Any =OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
a__ : List[str] =OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
a__ : Optional[int] =OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
a__ : Optional[int] =OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
a__ : int =OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
a__ : List[Any] =OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
a__ : List[str] =OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
a__ : str =OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
a__ : int =OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
a__ : Optional[int] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
a__ : Tuple =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
a__ : str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
a__ : Optional[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
a__ : Tuple =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
a__ : Optional[int] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
a__ : Union[str, Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
a__ : List[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
a__ : List[str] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
a__ : Optional[int] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
a__ : Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
a__ : str =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
a__ : List[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
a__ : Union[str, Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =FLAX_MODEL_MAPPING
a__ : List[Any] =auto_class_update(FlaxAutoModel)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =FLAX_MODEL_FOR_PRETRAINING_MAPPING
a__ : List[Any] =auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
a__ : Optional[int] =auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =FLAX_MODEL_FOR_MASKED_LM_MAPPING
a__ : Tuple =auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ : Any =auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ : int =auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
a__ : Any =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a__ : Optional[Any] =auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
a__ : Tuple =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int =FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
a__ : Optional[int] =auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a__ : Optional[Any] =auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
a__ : Union[str, Any] =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
a__ : Tuple =auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 53 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowerCamelCase( _a ):
lowercase_ : Dict = """deformable_detr"""
lowercase_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=3, lowerCamelCase=3_00, lowerCamelCase=10_24, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase="resnet50", lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=False, lowerCamelCase=3_00, lowerCamelCase=False, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : List[str] = backbone_config.get('model_type')
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Optional[int] = config_class.from_dict(lowerCamelCase)
_lowercase : Tuple = use_timm_backbone
_lowercase : List[str] = backbone_config
_lowercase : Tuple = num_channels
_lowercase : Optional[Any] = num_queries
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : Optional[int] = d_model
_lowercase : int = encoder_ffn_dim
_lowercase : List[Any] = encoder_layers
_lowercase : str = encoder_attention_heads
_lowercase : str = decoder_ffn_dim
_lowercase : Optional[Any] = decoder_layers
_lowercase : List[str] = decoder_attention_heads
_lowercase : Optional[int] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Any = activation_function
_lowercase : Optional[int] = init_std
_lowercase : int = init_xavier_std
_lowercase : Union[str, Any] = encoder_layerdrop
_lowercase : Tuple = auxiliary_loss
_lowercase : Union[str, Any] = position_embedding_type
_lowercase : str = backbone
_lowercase : List[Any] = use_pretrained_backbone
_lowercase : Any = dilation
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : Dict = encoder_n_points
_lowercase : Dict = decoder_n_points
_lowercase : Dict = two_stage
_lowercase : Union[str, Any] = two_stage_num_proposals
_lowercase : str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : Tuple = class_cost
_lowercase : int = bbox_cost
_lowercase : Optional[int] = giou_cost
# Loss coefficients
_lowercase : Optional[Any] = mask_loss_coefficient
_lowercase : Dict = dice_loss_coefficient
_lowercase : Tuple = bbox_loss_coefficient
_lowercase : Optional[int] = giou_loss_coefficient
_lowercase : Union[str, Any] = eos_coefficient
_lowercase : Union[str, Any] = focal_alpha
_lowercase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
_lowercase : Union[str, Any] = self.backbone_config.to_dict()
_lowercase : Tuple = self.__class__.model_type
return output
| 21 | 0 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
a__ : Dict = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
a__ : List[str] = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , bootstrap_aggregation=lowerCAmelCase_ , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , bootstrap_aggregation=lowerCAmelCase_ , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "rougeLsum"
__SCREAMING_SNAKE_CASE = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ , rouge_keys=[k] )[k]
__SCREAMING_SNAKE_CASE = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["rouge1", "rouge2", "rougeL"]
__SCREAMING_SNAKE_CASE = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ , rouge_keys=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ , rouge_keys=lowerCAmelCase_ )
assert score_sep == score_no_sep
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
__SCREAMING_SNAKE_CASE = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ ) == calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
__SCREAMING_SNAKE_CASE = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
__SCREAMING_SNAKE_CASE = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , rouge_keys=["rougeLsum"] , newline_sep=lowerCAmelCase_ )["rougeLsum"]
__SCREAMING_SNAKE_CASE = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Path("examples/seq2seq/test_data/wmt_en_ro" )
__SCREAMING_SNAKE_CASE = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
| 54 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[str] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
a_ : Optional[Any] = False
a_ : int = False
def __snake_case ( UpperCAmelCase_ : Namespace ):
return TrainCommand(UpperCAmelCase_ )
class snake_case ( lowercase ):
"""simple docstring"""
@staticmethod
def snake_case ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=UpperCamelCase , required=UpperCamelCase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=UpperCamelCase , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=UpperCamelCase , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=UpperCamelCase , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=UpperCamelCase , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=UpperCamelCase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=UpperCamelCase , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=UpperCamelCase , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=UpperCamelCase , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=UpperCamelCase , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=UpperCamelCase , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=UpperCamelCase , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=UpperCamelCase , default=1e-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=UpperCamelCase )
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = logging.get_logger("transformers-cli/training" )
lowerCamelCase_ = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=UpperCamelCase )
lowerCamelCase_ = args.output
lowerCamelCase_ = args.column_label
lowerCamelCase_ = args.column_text
lowerCamelCase_ = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
lowerCamelCase_ = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
lowerCamelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase_ = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
lowerCamelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase_ = args.validation_split
lowerCamelCase_ = args.train_batch_size
lowerCamelCase_ = args.valid_batch_size
lowerCamelCase_ = args.learning_rate
lowerCamelCase_ = args.adam_epsilon
def snake_case ( self ):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def snake_case ( self ):
"""simple docstring"""
raise NotImplementedError
def snake_case ( self ):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 55 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
_lowercase : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowercase : Tuple = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowercase : str = np.concatenate(lowerCamelCase_ , axis=0 )
_lowercase : Dict = np.array(lowerCamelCase_ ).astype(np.floataa ) / 2_55.0
_lowercase : Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowercase : str = 2.0 * image - 1.0
_lowercase : Tuple = torch.from_numpy(lowerCamelCase_ )
elif isinstance(image[0] , torch.Tensor ):
_lowercase : Any = torch.cat(lowerCamelCase_ , dim=0 )
return image
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0.99_95 ) -> Tuple:
if not isinstance(lowerCamelCase_ , np.ndarray ):
_lowercase : List[Any] = True
_lowercase : Any = va.device
_lowercase : Union[str, Any] = va.cpu().numpy()
_lowercase : int = va.cpu().numpy()
_lowercase : int = np.sum(va * va / (np.linalg.norm(lowerCamelCase_ ) * np.linalg.norm(lowerCamelCase_ )) )
if np.abs(lowerCamelCase_ ) > DOT_THRESHOLD:
_lowercase : Any = (1 - t) * va + t * va
else:
_lowercase : Dict = np.arccos(lowerCamelCase_ )
_lowercase : str = np.sin(lowerCamelCase_ )
_lowercase : int = theta_a * t
_lowercase : Dict = np.sin(lowerCamelCase_ )
_lowercase : Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowercase : List[Any] = sin_theta_t / sin_theta_a
_lowercase : Dict = sa * va + sa * va
if inputs_are_torch:
_lowercase : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
return va
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
for param in model.parameters():
_lowercase : Any = value
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCamelCase, text_encoder=lowerCamelCase, clip_model=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, coca_model=lowerCamelCase, coca_tokenizer=lowerCamelCase, coca_transform=lowerCamelCase, )
_lowercase : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size, lowerCamelCase)
else feature_extractor.size['shortest_edge']
)
_lowercase : Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
set_requires_grad(self.text_encoder, lowerCamelCase)
set_requires_grad(self.clip_model, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase = "auto") -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowercase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = min(int(num_inference_steps * strength), lowerCamelCase)
_lowercase : List[Any] = max(num_inference_steps - init_timestep, 0)
_lowercase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase, torch.Tensor):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(lowerCamelCase)}''')
_lowercase : Any = image.to(device=lowerCamelCase, dtype=lowerCamelCase)
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(lowerCamelCase)
]
_lowercase : int = torch.cat(lowerCamelCase, dim=0)
else:
_lowercase : int = self.vae.encode(lowerCamelCase).latent_dist.sample(lowerCamelCase)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : str = 0.1_8_2_1_5 * init_latents
_lowercase : List[str] = init_latents.repeat_interleave(lowerCamelCase, dim=0)
_lowercase : List[str] = randn_tensor(init_latents.shape, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
# get latents
_lowercase : Any = self.scheduler.add_noise(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : str = init_latents
return latents
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = self.coca_transform(lowerCamelCase).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_lowercase : List[str] = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
_lowercase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('<end_of_text>')[0].replace('<start_of_text>', '').rstrip(' .,')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Tuple = self.feature_extractor.preprocess(lowerCamelCase)
_lowercase : List[str] = torch.from_numpy(clip_image_input['pixel_values'][0]).unsqueeze(0).to(self.device).half()
_lowercase : int = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : int = image_embeddings_clip.repeat_interleave(lowerCamelCase, dim=0)
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : List[Any] = latents.detach().requires_grad_()
_lowercase : Union[str, Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Tuple = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_lowercase : Any = self.scheduler.alphas_cumprod[timestep]
_lowercase : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowercase : List[str] = torch.sqrt(lowerCamelCase)
_lowercase : Dict = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, lowerCamelCase):
_lowercase : Dict = self.scheduler.sigmas[index]
_lowercase : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler)} not supported''')
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Dict = 1 / 0.1_8_2_1_5 * sample
_lowercase : Optional[Any] = self.vae.decode(lowerCamelCase).sample
_lowercase : int = (image / 2 + 0.5).clamp(0, 1)
_lowercase : Any = transforms.Resize(self.feature_extractor_size)(lowerCamelCase)
_lowercase : Optional[Any] = self.normalize(lowerCamelCase).to(latents.dtype)
_lowercase : List[str] = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : Optional[Any] = spherical_dist_loss(lowerCamelCase, lowerCamelCase).mean() * clip_guidance_scale
_lowercase : str = -torch.autograd.grad(lowerCamelCase, lowerCamelCase)[0]
if isinstance(self.scheduler, lowerCamelCase):
_lowercase : Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowercase : List[str] = noise_pred_original
else:
_lowercase : List[Any] = noise_pred_original - torch.sqrt(lowerCamelCase) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = 5_12, lowerCamelCase = 5_12, lowerCamelCase = 0.6, lowerCamelCase = 50, lowerCamelCase = 7.5, lowerCamelCase = 1, lowerCamelCase = 0.0, lowerCamelCase = 1_00, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, lowerCamelCase = 0.8, lowerCamelCase = 0.1, lowerCamelCase = 0.1, ) -> int:
"""simple docstring"""
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(lowerCamelCase)} generators.''')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''')
if isinstance(lowerCamelCase, torch.Generator) and batch_size > 1:
_lowercase : Dict = [generator] + [None] * (batch_size - 1)
_lowercase : Optional[int] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowercase : Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowercase : str = ', '.join(lowerCamelCase)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : List[Any] = self.get_image_description(lowerCamelCase)
if style_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : Dict = self.get_image_description(lowerCamelCase)
# get prompt text embeddings for content and style
_lowercase : Optional[int] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : Optional[int] = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_lowercase : Union[str, Any] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : List[Any] = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_lowercase : Any = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# duplicate text embeddings for each generation per prompt
_lowercase : Dict = text_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# set timesteps
_lowercase : Dict = 'offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_offset:
_lowercase : Any = 1
self.scheduler.set_timesteps(lowerCamelCase, **lowerCamelCase)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_lowercase , _lowercase : List[Any] = self.get_timesteps(lowerCamelCase, lowerCamelCase, self.device)
_lowercase : str = timesteps[:1].repeat(lowerCamelCase)
# Preprocess image
_lowercase : str = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : int = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : Optional[int] = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
if clip_guidance_scale > 0:
_lowercase : Optional[int] = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = slerp(
lowerCamelCase, lowerCamelCase, lowerCamelCase)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowercase : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowercase : Tuple = content_text_input.input_ids.shape[-1]
_lowercase : Union[str, Any] = self.tokenizer([''], padding='max_length', max_length=lowerCamelCase, return_tensors='pt')
_lowercase : int = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_lowercase : Union[str, Any] = uncond_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowercase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowercase : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowercase : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowercase : List[Any] = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='cpu', dtype=lowerCamelCase).to(
self.device)
else:
_lowercase : Any = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase)
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
_lowercase : Tuple = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_lowercase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowercase : Dict = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_eta:
_lowercase : List[Any] = eta
# check if the scheduler accepts generator
_lowercase : Dict = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_lowercase : str = generator
with self.progress_bar(total=lowerCamelCase):
for i, t in enumerate(lowerCamelCase):
# expand the latents if we are doing classifier free guidance
_lowercase : List[str] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_lowercase : List[Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Dict = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowercase , _lowercase : Optional[Any] = noise_pred.chunk(2)
_lowercase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowercase : Tuple = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_lowercase , _lowercase : List[Any] = self.cond_fn(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, )
# compute the previous noisy sample x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Any = 1 / 0.1_8_2_1_5 * latents
_lowercase : List[str] = self.vae.decode(lowerCamelCase).sample
_lowercase : Tuple = (image / 2 + 0.5).clamp(0, 1)
_lowercase : List[Any] = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase : List[Any] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase)
| 21 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a : Any = logging.get_logger(__name__)
a : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a : Dict = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
a : Tuple = {'mobilebert-uncased': 512}
a : Optional[int] = {}
class a ( _lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = MobileBertTokenizer
def __init__( self : int , lowercase_ : Tuple=None , lowercase_ : int=None , lowercase_ : int=True , lowercase_ : int="[UNK]" , lowercase_ : Union[str, Any]="[SEP]" , lowercase_ : str="[PAD]" , lowercase_ : List[str]="[CLS]" , lowercase_ : List[str]="[MASK]" , lowercase_ : Union[str, Any]=True , lowercase_ : List[str]=None , **lowercase_ : int , ):
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowercase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowercase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowercase_ ) != tokenize_chinese_chars
):
snake_case_ = getattr(lowercase_ , normalizer_state.pop('''type''' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**lowercase_ )
snake_case_ = do_lower_case
def A_ ( self : str , lowercase_ : List[str] , lowercase_ : Optional[int]=None ):
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
snake_case_ = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 56 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = ConsistencyModelPipeline
lowercase_ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase_ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase_ : List[str] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet', )
return unet
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet_class_cond', )
return unet
def UpperCamelCase ( self, lowerCamelCase=False) -> Dict:
"""simple docstring"""
if class_cond:
_lowercase : Union[str, Any] = self.dummy_cond_unet
else:
_lowercase : Union[str, Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
_lowercase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Tuple:
"""simple docstring"""
if str(lowerCamelCase).startswith('mps'):
_lowercase : str = torch.manual_seed(lowerCamelCase)
else:
_lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Optional[int] = self.get_dummy_components()
_lowercase : str = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : Dict = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : int = image[0, -3:, -3:, -1]
_lowercase : Dict = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase)
_lowercase : Any = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Any = 0
_lowercase : List[str] = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Any = self.get_dummy_components()
_lowercase : Optional[Any] = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : List[str] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Union[str, Any] = 1
_lowercase : Tuple = None
_lowercase : Tuple = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase)
_lowercase : Dict = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : Optional[Any] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Tuple = 1
_lowercase : int = None
_lowercase : Tuple = 0
_lowercase : Dict = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase=False, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = torch.manual_seed(lowerCamelCase)
_lowercase : str = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
_lowercase : Optional[Any] = self.get_fixed_latents(seed=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase, shape=lowerCamelCase)
_lowercase : Tuple = latents
return inputs
def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Any:
"""simple docstring"""
if type(lowerCamelCase) == str:
_lowercase : Union[str, Any] = torch.device(lowerCamelCase)
_lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : List[str] = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
return latents
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Any = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : Optional[Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Union[str, Any] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_inputs()
_lowercase : int = 1
_lowercase : Optional[Any] = None
_lowercase : str = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : List[Any] = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
@require_torch_a
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Optional[int] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase):
_lowercase : Dict = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@require_torch_a
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : int = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase)
_lowercase : int = 1
_lowercase : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase):
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : int = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : int
__UpperCAmelCase : Node | None =None
__UpperCAmelCase : Node | None =None
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = Node(1 )
__lowerCAmelCase = Node(2 )
__lowerCAmelCase = Node(3 )
__lowerCAmelCase = Node(4 )
__lowerCAmelCase = Node(5 )
return tree
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
if root is None:
return output
__lowerCAmelCase = deque([root] )
while process_queue:
__lowerCAmelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
def populate_output(_UpperCamelCase , _UpperCamelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_UpperCamelCase , _UpperCamelCase )
return output
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
def populate_output(_UpperCamelCase , _UpperCamelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_UpperCamelCase , _UpperCamelCase )
return output
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if root is None:
return []
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = height(_UpperCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_UpperCamelCase , _UpperCamelCase ) )
__lowerCAmelCase = 1
else:
output.append(get_nodes_from_right_to_left(_UpperCamelCase , _UpperCamelCase ) )
__lowerCAmelCase = 0
return output
def _lowerCamelCase ( ): # Main function for testing.
'''simple docstring'''
__lowerCAmelCase = make_tree()
print(f"In-order Traversal: {inorder(_UpperCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_UpperCamelCase )}" )
print(f"Post-order Traversal: {postorder(_UpperCamelCase )}" , "\n" )
print(f"Height of Tree: {height(_UpperCamelCase )}" , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(_UpperCamelCase ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(_UpperCamelCase ) + 1 ):
print(f"Level {level}:" , get_nodes_from_left_to_right(_UpperCamelCase , level=_UpperCamelCase ) )
print("\nZigZag order Traversal: " )
print(zigzag(_UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : int = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> tuple[int, int]:
_lowercase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_lowercase : int = x_den * y_den * z_den
_lowercase : int = gcd(lowerCamelCase_ , lowerCamelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_( lowerCamelCase_ = 35 ) -> int:
_lowercase : set = set()
_lowercase : int
_lowercase : Fraction = Fraction(0 )
_lowercase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_lowercase : int = x_num * y_den + x_den * y_num
_lowercase : int = x_den * y_den
_lowercase : str = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : List[Any] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_lowercase : List[Any] = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : int = int(sqrt(lowerCamelCase_ ) )
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Optional[int] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=-1
_lowercase : Any = x_num * y_num
_lowercase : str = x_den * y_num + x_num * y_den
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : int = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : str = x_num * x_num * y_num * y_num
_lowercase : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : List[str] = int(sqrt(lowerCamelCase_ ) )
_lowercase : Union[str, Any] = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Tuple = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
for num, den in unique_s:
total += Fraction(lowerCamelCase_ , lowerCamelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 21 | 0 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , **A ) -> Any:
super().__init__(**A )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(A )
def snake_case_( self , **A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , A , *A , A=None , A=None , **A ) -> List[Any]:
return super().__call__(A , *A , num_workers=A , batch_size=A , **A )
def snake_case_( self , A , A=64 , A = 0 , A = 512 / 1500 , A = 32 , A = 1 , ) -> Tuple:
_SCREAMING_SNAKE_CASE = load_image(A )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
A , A , A , A , A , A )
_SCREAMING_SNAKE_CASE = self.image_processor(images=A , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(A , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , A , A ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case_( self , A , A=0.88 , A=0.95 , A=0 , A=1 , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**A )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
A , A , A , A , binarize=A )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , A , A , A , A , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case_( self , A , A=False , A=False , A=0.7 , ) -> str:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(A )
_SCREAMING_SNAKE_CASE = torch.cat(A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
A , A , A , A )
_SCREAMING_SNAKE_CASE = defaultdict(A )
for output in model_outputs:
for k, v in output.items():
extra[k].append(A )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 58 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 | 0 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase :
def __init__(self : str , snake_case__ : int ) -> None:
'''simple docstring'''
snake_case : str = num_of_nodes
snake_case : list[list[int]] = []
snake_case : dict[int, int] = {}
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : int ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : int ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
snake_case : Union[str, Any] = self.find_component(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : list[int] , snake_case__ : int , snake_case__ : int ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
snake_case : Any = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case__ )
elif component_size[u_node] >= component_size[v_node]:
snake_case : List[Any] = self.find_component(snake_case__ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> None:
'''simple docstring'''
snake_case : Dict = []
snake_case : Any = 0
snake_case : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
snake_case : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
snake_case , snake_case , snake_case : Dict = edge
snake_case : List[str] = self.m_component[u]
snake_case : Union[str, Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
snake_case : Optional[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case__ , snake_case__ ):
snake_case , snake_case , snake_case : List[str] = edge
snake_case : Tuple = self.m_component[u]
snake_case : Dict = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case__ , snake_case__ , snake_case__ )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
snake_case : Any = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
if len(lowerCamelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
_lowercase : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : list , _snake_case : list ):
_validate_point(_snake_case )
_validate_point(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(_snake_case , _snake_case ) ) )
def _snake_case ( _snake_case : list[float] ):
if point:
if isinstance(_snake_case , _snake_case ):
for item in point:
if not isinstance(_snake_case , (int, float) ):
lowerCAmelCase : Union[str, Any] = (
'''Expected a list of numbers as input, found '''
f'''{type(_snake_case ).__name__}'''
)
raise TypeError(_snake_case )
else:
lowerCAmelCase : List[str] = f'''Expected a list of numbers as input, found {type(_snake_case ).__name__}'''
raise TypeError(_snake_case )
else:
raise ValueError('''Missing an input''' )
def _snake_case ( _snake_case : list , _snake_case : list ):
_validate_point(_snake_case )
_validate_point(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(_snake_case , _snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCamelCase_( lowerCamelCase_ = 200_0000 ) -> int:
_lowercase : list[int] = [0]
_lowercase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
_lowercase : int = 0
# an estimate of b, using the quadratic formula
_lowercase : float
# the largest integer less than b_estimate
_lowercase : int
# the largest integer less than b_estimate
_lowercase : int
# the triangle number corresponding to b_floor
_lowercase : int
# the triangle number corresponding to b_ceil
_lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowercase : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowercase : List[str] = floor(lowerCamelCase_ )
_lowercase : Dict = ceil(lowerCamelCase_ )
_lowercase : List[str] = triangle_numbers[b_floor]
_lowercase : List[str] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Union[str, Any] = triangle_b_first_guess * triangle_a
_lowercase : Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Any = triangle_b_second_guess * triangle_a
_lowercase : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
_a = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase_ : dict[str, str | None] = {}
UpperCAmelCase_ : Optional[Any] = source_vertex
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = {self.source_vertex}
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase_ : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowercase_ )
UpperCAmelCase_ : Dict = vertex
queue.append(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase_ : List[str] = self.parent.get(lowercase_ )
if target_vertex_parent is None:
UpperCAmelCase_ : str = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowercase_ )
return self.shortest_path(lowercase_ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
_a = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 61 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _lowerCamelCase:
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : str = np.abs((a - b)).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Any = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase , _lowercase : Union[str, Any] = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : str = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : Tuple = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase)
_lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : Tuple = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : str = after_output[0]
_lowercase : Optional[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-3)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase , _lowercase : Any = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : Tuple = model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase)
_lowercase : int = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase), vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : Optional[Any] = to_atuple(vision_model.config.image_size)
_lowercase : Any = to_atuple(vision_model.config.patch_size)
_lowercase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len))
_lowercase : List[str] = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase), text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
pt_model.to(lowerCamelCase)
pt_model.eval()
# prepare inputs
_lowercase : Any = inputs_dict
_lowercase : Optional[int] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
_lowercase : Tuple = pt_model(**lowerCamelCase).to_tuple()
_lowercase : Any = fx_model(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase)
_lowercase : List[Any] = fx_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase)
_lowercase : List[Any] = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase)
pt_model_loaded.to(lowerCamelCase)
pt_model_loaded.eval()
with torch.no_grad():
_lowercase : Optional[Any] = pt_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[Any] = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : str = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase)
_lowercase : List[Any] = fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Tuple = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : List[str] = load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params)
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase)
@is_pt_flax_cross_test
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase : List[str] = config_inputs_dict.pop('vision_config')
_lowercase : str = config_inputs_dict.pop('text_config')
_lowercase : int = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase)
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.get_pretrained_model_and_inputs()
_lowercase : Optional[int] = model_a(**lowerCamelCase)
_lowercase : Tuple = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : List[Any] = model_a(**lowerCamelCase)
_lowercase : Tuple = after_outputs[0]
_lowercase : Dict = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-5)
@require_flax
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : List[Any] = 13
_lowercase : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Tuple = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Union[str, Any] = random_attention_mask([batch_size, 4])
_lowercase : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModelTester(self)
_lowercase : Any = FlaxBertModelTester(self)
_lowercase : Dict = vit_model_tester.prepare_config_and_inputs()
_lowercase : Any = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : List[str] = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : Tuple = 13
_lowercase : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Union[str, Any] = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Any = random_attention_mask([batch_size, 4])
_lowercase : Dict = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = FlaxCLIPVisionModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = FlaxCLIPVisionModelTester(self)
_lowercase : Union[str, Any] = FlaxBertModelTester(self)
_lowercase : Tuple = clip_model_tester.prepare_config_and_inputs()
_lowercase : str = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : Dict = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian', logit_scale_init_value=1.0)
_lowercase : List[str] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
_lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_lowercase : List[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='np')
_lowercase : List[Any] = model(**lowerCamelCase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
_lowercase : Optional[int] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]])
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3))
| 21 | 0 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
__UpperCamelCase =re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$' , SCREAMING_SNAKE_CASE__ )
if matches:
__UpperCamelCase =float(matches[1] )
__UpperCamelCase =int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCamelCase =10_01
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ) + 1: v for k, v in idalabel.items()}
__UpperCamelCase ='background'
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int=False ):
__UpperCamelCase =get_mobilenet_va_config(SCREAMING_SNAKE_CASE__ )
# Load 🤗 model
__UpperCamelCase =MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCamelCase =MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' )
__UpperCamelCase =model(**SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
__UpperCamelCase =torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCamelCase =torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
__UpperCamelCase =None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('Pushing to the hub...' )
__UpperCamelCase ='google/' + model_name
image_processor.push_to_hub(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 62 |
import random
from typing import Any
def UpperCamelCase_( lowerCamelCase_ ) -> list[Any]:
for _ in range(len(lowerCamelCase_ ) ):
_lowercase : Optional[int] = random.randint(0 , len(lowerCamelCase_ ) - 1 )
_lowercase : str = random.randint(0 , len(lowerCamelCase_ ) - 1 )
_lowercase , _lowercase : Optional[int] = data[b], data[a]
return data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = [0, 1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE : int = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 21 | 0 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCAmelCase_ : int = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Union[str, Any] = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
lowerCAmelCase_ : List[str] = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
'emoji': True,
},
}
]
lowerCAmelCase_ : Optional[int] = 0
for log in Path().glob('*.log'):
lowerCAmelCase_ : Any = 0
with open(log, 'r') as f:
for line in f:
lowerCAmelCase_ : Optional[int] = json.loads(line)
if line.get('nodeid', '') != "":
lowerCAmelCase_ : Any = line['nodeid']
if line.get('duration', None) is not None:
lowerCAmelCase_ : str = f"""{line['duration']:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCAmelCase_ : Union[str, Any] = []
log.unlink()
lowerCAmelCase_ : Any = ''
lowerCAmelCase_ : Any = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : Any = {}
for test in failed_tests:
lowerCAmelCase_ : Optional[Any] = test[0].split('::')
lowerCAmelCase_ : Union[str, Any] = data[0].split('/')[-1]
if data[0] not in filesafailed:
lowerCAmelCase_ : Optional[int] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCAmelCase_ : Optional[Any] = [test[0] for test in failed_table]
lowerCAmelCase_ : List[Any] = list(set(files))
# Count number of instances in failed_tests
lowerCAmelCase_ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCAmelCase_ : int = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
lowerCAmelCase_ : int = 'Too many failed tests, please see the full report in the Action results.'
lowerCAmelCase_ : Dict = len(err) + 10
lowerCAmelCase_ : List[str] = message[: 30_00 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
lowerCAmelCase_ : List[Any] = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
lowerCAmelCase_ : Optional[Any] = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
lowerCAmelCase_ : int = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
lowerCAmelCase_ : Optional[Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
lowerCAmelCase_ : List[str] = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCAmelCase_ : Tuple = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
lowerCAmelCase_ : Dict = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCAmelCase_ : List[str] = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCAmelCase_ : Dict = row[0]
else:
lowerCAmelCase_ : Any = ''
lowerCAmelCase_ : Dict = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 63 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Tuple = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCamelCase, 'width_multiplier'))
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=64, lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase="swish", lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=None, lowerCamelCase=0.2_5, lowerCamelCase=0.0, lowerCamelCase=0.0, ) -> Any:
"""simple docstring"""
_lowercase : Any = parent
_lowercase : Optional[int] = batch_size
_lowercase : Dict = image_size
_lowercase : str = patch_size
_lowercase : Optional[int] = num_channels
_lowercase : Optional[Any] = make_divisible(5_12 * width_multiplier, divisor=8)
_lowercase : str = hidden_act
_lowercase : Dict = conv_kernel_size
_lowercase : int = output_stride
_lowercase : Optional[Any] = classifier_dropout_prob
_lowercase : Tuple = use_labels
_lowercase : int = is_training
_lowercase : Optional[Any] = num_labels
_lowercase : Dict = initializer_range
_lowercase : List[str] = scope
_lowercase : Tuple = width_multiplier
_lowercase : List[str] = ffn_dropout
_lowercase : Dict = attn_dropout
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : Dict = None
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels)
_lowercase : str = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : int = self.num_labels
_lowercase : Optional[int] = MobileViTVaForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
_lowercase : List[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : int = config_and_inputs
_lowercase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ : Dict = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ : List[Any] = False
lowercase_ : Optional[int] = False
lowercase_ : List[Any] = False
lowercase_ : Tuple = False
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = MobileViTVaModelTester(self)
_lowercase : Tuple = MobileViTVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.')
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(lowerCamelCase)
_lowercase : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Any = [*signature.parameters.keys()]
_lowercase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : Optional[Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : List[Any] = outputs.hidden_states
_lowercase : Tuple = 5
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowercase : Optional[int] = 2
for i in range(len(lowerCamelCase)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2)
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Tuple = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Optional[Any] = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = MobileViTVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> Dict:
_lowercase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256')
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256').to(
lowerCamelCase)
_lowercase : Dict = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Tuple = model(**lowerCamelCase)
# verify the logits
_lowercase : Optional[int] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Optional[int] = model.to(lowerCamelCase)
_lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Tuple = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : List[Any] = model(**lowerCamelCase)
_lowercase : str = outputs.logits
# verify the logits
_lowercase : Tuple = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
], device=lowerCamelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Tuple = model.to(lowerCamelCase)
_lowercase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : int = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**lowerCamelCase)
_lowercase : Any = outputs.logits.detach().cpu()
_lowercase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase, target_sizes=[(50, 60)])
_lowercase : Any = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
_lowercase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase)
_lowercase : Optional[int] = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
| 21 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(__a )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "rag"
lowercase__ = True
def __init__( self: Union[str, Any], a_: int=None, a_: Tuple=True, a_: Optional[int]=None, a_: List[str]=None, a_: int=None, a_: Optional[Any]=None, a_: List[str]=None, a_: Optional[Any]=" / ", a_: Tuple=" // ", a_: List[Any]=5, a_: Dict=300, a_: Tuple=768, a_: Optional[Any]=8, a_: int="wiki_dpr", a_: Any="train", a_: Optional[int]="compressed", a_: Optional[int]=None, a_: List[Any]=None, a_: Optional[Any]=False, a_: str=False, a_: Dict=0.0, a_: Union[str, Any]=True, a_: Union[str, Any]=False, a_: str=False, a_: List[str]=False, a_: Union[str, Any]=True, a_: Any=None, **a_: List[Any], ):
'''simple docstring'''
super().__init__(
bos_token_id=a_, pad_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, forced_eos_token_id=a_, is_encoder_decoder=a_, prefix=a_, vocab_size=a_, **a_, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_snake_case : Union[str, Any] = kwargs.pop("""question_encoder""" )
_snake_case : List[str] = question_encoder_config.pop("""model_type""" )
_snake_case : Union[str, Any] = kwargs.pop("""generator""" )
_snake_case : Any = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
_snake_case : Union[str, Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Optional[Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Any = reduce_loss
_snake_case : Optional[int] = label_smoothing
_snake_case : Dict = exclude_bos_score
_snake_case : int = do_marginalize
_snake_case : Optional[Any] = title_sep
_snake_case : Any = doc_sep
_snake_case : List[str] = n_docs
_snake_case : Tuple = max_combined_length
_snake_case : Optional[Any] = dataset
_snake_case : Union[str, Any] = dataset_split
_snake_case : Tuple = index_name
_snake_case : Any = retrieval_vector_size
_snake_case : Union[str, Any] = retrieval_batch_size
_snake_case : str = passages_path
_snake_case : Tuple = index_path
_snake_case : List[Any] = use_dummy_dataset
_snake_case : Optional[Any] = output_retrieved
_snake_case : Tuple = do_deduplication
_snake_case : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_snake_case : Dict = getattr(self.generator, """forced_eos_token_id""", a_ )
@classmethod
def UpperCamelCase_ ( cls: Any, a_: PretrainedConfig, a_: PretrainedConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : List[str] = self.question_encoder.to_dict()
_snake_case : Tuple = self.generator.to_dict()
_snake_case : Dict = self.__class__.model_type
return output
| 64 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE : str = "bart"
SCREAMING_SNAKE_CASE : Optional[int] = True
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> int:
if LOAD_DENSE_INDEX:
_lowercase : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
_lowercase : Union[str, Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
_lowercase : str = qar_model.eval()
else:
_lowercase , _lowercase : Any = (None, None)
if MODEL_TYPE == "bart":
_lowercase : Dict = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
_lowercase : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
_lowercase : Any = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
_lowercase : List[Any] = sas_model.eval()
else:
_lowercase , _lowercase : Union[str, Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> str:
if LOAD_DENSE_INDEX:
_lowercase : Optional[Any] = faiss.StandardGpuResources()
_lowercase : Optional[int] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
_lowercase : Tuple = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
_lowercase : Any = faiss.IndexFlatIP(128 )
_lowercase : Union[str, Any] = faiss.index_cpu_to_gpu(lowerCamelCase_ , 1 , lowerCamelCase_ )
wikiaab_gpu_index_flat.add(lowerCamelCase_ ) # TODO fix for larger GPU
else:
_lowercase , _lowercase : Any = (None, None)
_lowercase : List[str] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> Any:
_lowercase : List[str] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
_lowercase : Optional[Any] = elia['train_eli5']
_lowercase : Tuple = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
_lowercase : Union[str, Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCamelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = load_train_data()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=10 ) -> List[str]:
_lowercase : Any = embed_questions_for_retrieval([question] , lowerCamelCase_ , lowerCamelCase_ )
_lowercase , _lowercase : List[str] = eli5_train_q_index.search(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = [elia_train[int(lowerCamelCase_ )] for i in I[0]]
return nn_examples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_="wiki40b" , lowerCamelCase_="dense" , lowerCamelCase_=10 ) -> Dict:
if source == "none":
_lowercase , _lowercase : Union[str, Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_lowercase , _lowercase : Dict = query_qa_dense_index(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
_lowercase , _lowercase : str = query_es_index(
lowerCamelCase_ , lowerCamelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=lowerCamelCase_ , )
_lowercase : List[Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
_lowercase : Union[str, Any] = 'question: {} context: {}'.format(lowerCamelCase_ , lowerCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase_ : None),
} )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_=256 , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=0.95 , lowerCamelCase_=0.8 ) -> Dict:
with torch.no_grad():
_lowercase : str = qa_sas_generate(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_answers=1 , num_beams=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ , do_sample=lowerCamelCase_ , temp=lowerCamelCase_ , top_p=lowerCamelCase_ , top_k=lowerCamelCase_ , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
SCREAMING_SNAKE_CASE : Union[str, Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
SCREAMING_SNAKE_CASE : List[Any] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE : Any = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
SCREAMING_SNAKE_CASE : Optional[int] = st.sidebar.checkbox("Demo options")
if demo_options:
SCREAMING_SNAKE_CASE : List[str] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE : Optional[int] = action_list.index(action_st)
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
SCREAMING_SNAKE_CASE : int = show_type == "Show full text of passages"
else:
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
SCREAMING_SNAKE_CASE : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
SCREAMING_SNAKE_CASE : int = "wiki40b"
SCREAMING_SNAKE_CASE : int = "dense"
SCREAMING_SNAKE_CASE : str = "beam"
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : List[str] = 64
SCREAMING_SNAKE_CASE : Union[str, Any] = 256
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Generation options")
if generate_options:
SCREAMING_SNAKE_CASE : Any = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE : List[Any] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE : int = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE : int = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : Any = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : str = None
# start main text
SCREAMING_SNAKE_CASE : List[str] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
SCREAMING_SNAKE_CASE : str = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE : List[str] = st.text_input("Enter your question here:", "")
else:
SCREAMING_SNAKE_CASE : Optional[int] = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = make_support(question, source=wiki_source, method="dense", n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method="sparse", n_results=10)
SCREAMING_SNAKE_CASE : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE : Optional[Any] = support_list[:10]
SCREAMING_SNAKE_CASE : int = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE : Optional[Any] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
SCREAMING_SNAKE_CASE : List[Any] = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE : Union[str, Any] = "[{}]({})".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE : Any = sec_titles.split(" & ")
SCREAMING_SNAKE_CASE : List[Any] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE : str = find_nearest_training(question)
SCREAMING_SNAKE_CASE : Any = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
SCREAMING_SNAKE_CASE : str = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
SCREAMING_SNAKE_CASE : Tuple = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 21 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Dict = 'segformer'
def __init__(self : Optional[int] , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : Optional[int]=[2, 2, 2, 2] , __UpperCAmelCase : List[str]=[8, 4, 2, 1] , __UpperCAmelCase : Any=[3_2, 6_4, 1_6_0, 2_5_6] , __UpperCAmelCase : int=[7, 3, 3, 3] , __UpperCAmelCase : Optional[Any]=[4, 2, 2, 2] , __UpperCAmelCase : List[str]=[1, 2, 5, 8] , __UpperCAmelCase : Dict=[4, 4, 4, 4] , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : List[str]=1E-6 , __UpperCAmelCase : Union[str, Any]=2_5_6 , __UpperCAmelCase : List[Any]=2_5_5 , **__UpperCAmelCase : Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , __UpperCAmelCase , )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = num_encoder_blocks
UpperCAmelCase__ = depths
UpperCAmelCase__ = sr_ratios
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = patch_sizes
UpperCAmelCase__ = strides
UpperCAmelCase__ = mlp_ratios
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = classifier_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = decoder_hidden_size
UpperCAmelCase__ = kwargs.get("reshape_last_stage" , __UpperCAmelCase )
UpperCAmelCase__ = semantic_loss_ignore_index
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Any = version.parse('1.11' )
@property
def lowercase_ (self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ (self : Tuple ) -> float:
"""simple docstring"""
return 1E-4
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return 1_2
| 65 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : List[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _lowerCamelCase( _a ):
lowercase_ : Any = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
SCREAMING_SNAKE_CASE : Any = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
SCREAMING_SNAKE_CASE : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class _lowerCamelCase:
def __call__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
elif titles is None or texts is None:
_lowercase : Dict = titles if texts is None else texts
return super().__call__(
lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
_lowercase : Union[str, Any] = titles if not isinstance(lowerCamelCase, lowerCamelCase) else [titles]
_lowercase : Tuple = texts if not isinstance(lowerCamelCase, lowerCamelCase) else [texts]
_lowercase : Optional[Any] = len(lowerCamelCase)
_lowercase : Any = questions if not isinstance(lowerCamelCase, lowerCamelCase) else [questions] * n_passages
if len(lowerCamelCase) != len(lowerCamelCase):
raise ValueError(
F'''There should be as many titles than texts but got {len(lowerCamelCase)} titles and {len(lowerCamelCase)} texts.''')
_lowercase : Any = super().__call__(lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : Tuple = super().__call__(lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : int = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase, lowerCamelCase)
]
}
if return_attention_mask is not False:
_lowercase : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_lowercase : Union[str, Any] = attention_mask
return self.pad(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 16, lowerCamelCase = 64, lowerCamelCase = 4, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : Union[str, Any] = reader_input['input_ids']
_lowercase , _lowercase , _lowercase : Tuple = reader_output[:3]
_lowercase : Tuple = len(lowerCamelCase)
_lowercase : str = sorted(range(lowerCamelCase), reverse=lowerCamelCase, key=relevance_logits.__getitem__)
_lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowercase : str = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_lowercase : Any = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowercase : List[Any] = sequence_ids.index(self.pad_token_id)
else:
_lowercase : List[str] = len(lowerCamelCase)
_lowercase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCamelCase, top_spans=lowerCamelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCamelCase, start_index=lowerCamelCase, end_index=lowerCamelCase, text=self.decode(sequence_ids[start_index : end_index + 1]), ))
if len(lowerCamelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : str = []
for start_index, start_score in enumerate(lowerCamelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_lowercase : Dict = sorted(lowerCamelCase, key=lambda lowerCamelCase: x[1], reverse=lowerCamelCase)
_lowercase : List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''')
_lowercase : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCamelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class _lowerCamelCase( _a, _a ):
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Any = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
lowercase_ : str = ["""input_ids""", """attention_mask"""]
| 21 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_ :str = """hf-internal-testing/tiny-random-t5"""
snake_case_ :str = AutoTokenizer.from_pretrained(snake_case )
snake_case_ :Tuple = AutoModelForSeqaSeqLM.from_pretrained(snake_case )
snake_case_ :Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" )
snake_case_ :int = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case_ :Optional[int] = model.generate(**snake_case )
snake_case_ :List[str] = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
snake_case_ :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case_ :int = model_reloaded.generate(**snake_case )
self.assertTrue(torch.allclose(snake_case , snake_case ) )
def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]:
snake_case_ :List[Any] = """hf-internal-testing/tiny-random-t5"""
snake_case_ :Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(snake_case )
snake_case_ :Dict = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(snake_case ):
model.save_pretrained(snake_case )
snake_case_ :Union[str, Any] = model.reverse_bettertransformer()
model.save_pretrained(snake_case )
| 66 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not numbers:
return 0
if not isinstance(lowerCamelCase_ , (list, tuple) ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
_lowercase : int = numbers[0]
for i in range(1 , len(lowerCamelCase_ ) ):
# update the maximum and minimum subarray products
_lowercase : Union[str, Any] = numbers[i]
if number < 0:
_lowercase , _lowercase : Any = min_till_now, max_till_now
_lowercase : Union[str, Any] = max(lowerCamelCase_ , max_till_now * number )
_lowercase : Union[str, Any] = min(lowerCamelCase_ , min_till_now * number )
# update the maximum product found till now
_lowercase : Optional[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
return max_prod
| 21 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def __lowerCAmelCase ( UpperCamelCase__ ) -> int:
__lowerCamelCase = SwinConfig(image_size=1_92 )
if "base" in model_name:
__lowerCamelCase = 6
__lowerCamelCase = 1_28
__lowerCamelCase = (2, 2, 18, 2)
__lowerCamelCase = (4, 8, 16, 32)
elif "large" in model_name:
__lowerCamelCase = 12
__lowerCamelCase = 1_92
__lowerCamelCase = (2, 2, 18, 2)
__lowerCamelCase = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__lowerCamelCase = window_size
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = num_heads
return config
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
if "encoder.mask_token" in name:
__lowerCamelCase = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__lowerCamelCase = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__lowerCamelCase = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__lowerCamelCase = '''layernorm.weight'''
if name == "encoder.norm.bias":
__lowerCamelCase = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__lowerCamelCase = '''swin.''' + name
return name
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(UpperCamelCase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__lowerCamelCase = key.split('''.''' )
__lowerCamelCase = int(key_split[2] )
__lowerCamelCase = int(key_split[4] )
__lowerCamelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[
dim : dim * 2, :
]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = val[
:dim
]
__lowerCamelCase = val[
dim : dim * 2
]
__lowerCamelCase = val[
-dim:
]
else:
__lowerCamelCase = val
return orig_state_dict
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
__lowerCamelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
__lowerCamelCase = get_swin_config(UpperCamelCase__ )
__lowerCamelCase = SwinForMaskedImageModeling(UpperCamelCase__ )
model.eval()
__lowerCamelCase = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
__lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase = ViTImageProcessor(size={'''height''': 1_92, '''width''': 1_92} )
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' )
with torch.no_grad():
__lowerCamelCase = model(**UpperCamelCase__ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(f"""microsoft/{model_name}""" )
image_processor.push_to_hub(f"""microsoft/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCAmelCase =parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 67 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE : Tuple = (3, 9, -11, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowerCamelCase:
lowercase_ : int
lowercase_ : Node | None
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Node | None = None
for i in sorted(lowerCamelCase, reverse=lowerCamelCase):
_lowercase : Tuple = Node(lowerCamelCase, self.head)
def __iter__( self) -> Iterator[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.head
while node:
yield node.data
_lowercase : int = node.next_node
def __len__( self) -> int:
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self) -> str:
"""simple docstring"""
return " -> ".join([str(lowerCamelCase) for node in self])
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> SortedLinkedList:
return SortedLinkedList(list(lowerCamelCase_ ) + list(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 21 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = ['pixel_values']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None:
'''simple docstring'''
super().__init__(**lowercase )
A__ = size if size is not None else {"shortest_edge": 224}
A__ = get_size_dict(lowercase , default_to_square=lowercase )
A__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
A__ = get_size_dict(lowercase , default_to_square=lowercase , param_name="crop_size" )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A__ = get_resize_output_image_size(lowercase , size=size["shortest_edge"] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> int:
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(lowercase , param_name="size" , default_to_square=lowercase )
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(lowercase , param_name="crop_size" , default_to_square=lowercase )
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A__ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A__ = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A__ = {"pixel_values": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 68 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Any = KandinskyImgaImgPipeline
lowercase_ : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
lowercase_ : Any = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
lowercase_ : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ : Union[str, Any] = False
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return 1_00
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=10_05, )
_lowercase : Optional[int] = MultilingualCLIP(lowerCamelCase)
_lowercase : List[str] = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Union[str, Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase : Optional[Any] = UNetaDConditionModel(**lowerCamelCase)
return model
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.dummy_text_encoder
_lowercase : List[Any] = self.dummy_tokenizer
_lowercase : int = self.dummy_unet
_lowercase : int = self.dummy_movq
_lowercase : Optional[int] = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowercase : List[Any] = DDIMScheduler(**lowerCamelCase)
_lowercase : List[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Dict:
"""simple docstring"""
_lowercase : List[str] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[Any] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(lowerCamelCase)
# create init_image
_lowercase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[int] = image.cpu().permute(0, 2, 3, 1)[0]
_lowercase : Tuple = Image.fromarray(np.uinta(lowerCamelCase)).convert('RGB').resize((2_56, 2_56))
if str(lowerCamelCase).startswith('mps'):
_lowercase : List[str] = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = 'cpu'
_lowercase : Tuple = self.get_dummy_components()
_lowercase : str = self.pipeline_class(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = pipe(**self.get_dummy_inputs(lowerCamelCase))
_lowercase : Optional[int] = output.images
_lowercase : List[Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase), return_dict=lowerCamelCase, )[0]
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : Tuple = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy')
_lowercase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
_lowercase : Optional[int] = 'A red cartoon frog, 4k'
_lowercase : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior', torch_dtype=torch.floataa)
pipe_prior.to(lowerCamelCase)
_lowercase : Optional[Any] = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1', torch_dtype=torch.floataa)
_lowercase : List[Any] = pipeline.to(lowerCamelCase)
pipeline.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = torch.Generator(device='cpu').manual_seed(0)
_lowercase , _lowercase : List[Any] = pipe_prior(
lowerCamelCase, generator=lowerCamelCase, num_inference_steps=5, negative_prompt='', ).to_tuple()
_lowercase : Union[str, Any] = pipeline(
lowerCamelCase, image=lowerCamelCase, image_embeds=lowerCamelCase, negative_image_embeds=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=1_00, height=7_68, width=7_68, strength=0.2, output_type='np', )
_lowercase : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase)
| 21 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__UpperCamelCase = '''Create a default config file for Accelerate with only a few flags set.'''
def UpperCAmelCase ( UpperCAmelCase="no" , UpperCAmelCase = default_json_config_file , UpperCAmelCase = False ) -> int:
snake_case_ = Path(UpperCAmelCase )
path.parent.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
if path.exists():
print(
f'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
snake_case_ = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
snake_case_ = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
snake_case_ = torch.cuda.device_count()
snake_case_ = num_gpus
snake_case_ = False
if num_gpus > 1:
snake_case_ = 'MULTI_GPU'
else:
snake_case_ = 'NO'
elif is_xpu_available() and use_xpu:
snake_case_ = torch.xpu.device_count()
snake_case_ = num_xpus
snake_case_ = False
if num_xpus > 1:
snake_case_ = 'MULTI_XPU'
else:
snake_case_ = 'NO'
elif is_npu_available():
snake_case_ = torch.npu.device_count()
snake_case_ = num_npus
snake_case_ = False
if num_npus > 1:
snake_case_ = 'MULTI_NPU'
else:
snake_case_ = 'NO'
else:
snake_case_ = 0
snake_case_ = True
snake_case_ = 1
snake_case_ = 'NO'
snake_case_ = ClusterConfig(**UpperCAmelCase )
config.to_json_file(UpperCAmelCase )
return path
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
snake_case_ = parser.add_parser('default' , parents=UpperCAmelCase , help=UpperCAmelCase , formatter_class=UpperCAmelCase )
parser.add_argument(
'--config_file' , default=UpperCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase )
return parser
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
snake_case_ = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'accelerate configuration saved at {config_file}' )
| 69 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _lowerCamelCase( _a ):
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
super().__init__(*lowerCamelCase, **lowerCamelCase)
requires_backends(self, 'vision')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def UpperCamelCase ( self, lowerCamelCase=None) -> int:
"""simple docstring"""
_lowercase : Dict = {}
if top_k is not None:
_lowercase : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self, lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
return super().__call__(lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = load_image(lowerCamelCase)
_lowercase : List[str] = self.image_processor(images=lowerCamelCase, return_tensors=self.framework)
return model_inputs
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.model(**lowerCamelCase)
return model_outputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=5) -> Dict:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowercase : List[Any] = self.model.config.num_labels
if self.framework == "pt":
_lowercase : int = model_outputs.logits.softmax(-1)[0]
_lowercase , _lowercase : Union[str, Any] = probs.topk(lowerCamelCase)
elif self.framework == "tf":
_lowercase : int = stable_softmax(model_outputs.logits, axis=-1)[0]
_lowercase : List[Any] = tf.math.top_k(lowerCamelCase, k=lowerCamelCase)
_lowercase , _lowercase : Any = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''')
_lowercase : str = scores.tolist()
_lowercase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase, lowerCamelCase)]
| 21 | 0 |
'''simple docstring'''
import os
import string
import sys
A__ : str =1 << 8
A__ : Optional[int] ={
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
A__ : Optional[int] =KEYMAP['''up''']
A__ : Tuple =KEYMAP['''left''']
if sys.platform == "win32":
A__ : int =[]
A__ : int ={
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
A__ : List[Any] =ord(str(i))
def UpperCamelCase__ ( ):
"""simple docstring"""
if os.name == "nt":
import msvcrt
_lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowerCAmelCase ) == 0:
# Read the keystroke
_lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(lowerCAmelCase )
if ord(lowerCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
_lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
_lowerCAmelCase = cha[1]
else:
_lowerCAmelCase = ch.decode(lowerCAmelCase )
else:
_lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCAmelCase = sys.stdin.fileno()
_lowerCAmelCase = termios.tcgetattr(lowerCAmelCase )
try:
tty.setraw(lowerCAmelCase )
_lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowerCAmelCase , termios.TCSADRAIN , lowerCAmelCase )
return ch
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = get_raw_chars()
if ord(lowerCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowerCAmelCase ) == KEYMAP["esc"]:
_lowerCAmelCase = get_raw_chars()
if ord(lowerCAmelCase ) == KEYMAP["mod_int"]:
_lowerCAmelCase = get_raw_chars()
if ord(lowerCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowerCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowerCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 70 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
_lowercase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase_( ) -> Optional[int]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A_ :int = None
A_ :Optional[int] = logging.get_logger(__name__)
A_ :Tuple = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
A_ :int = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
A_ :Union[str, Any] = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
A_ :int = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[str] =VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Tuple =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str =["""input_ids""", """attention_mask"""]
UpperCamelCase__ : Any =MBartTokenizer
UpperCamelCase__ : List[int] =[]
UpperCamelCase__ : List[int] =[]
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : int =vocab_file
__UpperCamelCase : Tuple =False if not self.vocab_file else True
__UpperCamelCase : Dict =FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__UpperCamelCase : List[str] ={
lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__UpperCamelCase : Optional[Any] =src_lang if src_lang is not None else 'en_XX'
__UpperCamelCase : str =self.convert_tokens_to_ids(self._src_lang )
__UpperCamelCase : str =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[Any] =[self.sep_token_id]
__UpperCamelCase : Tuple =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase : List[str] =src_lang
__UpperCamelCase : List[str] =self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =self.convert_tokens_to_ids(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =tgt_lang_id
return inputs
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = "en_XX" , lowerCamelCase__ = None , lowerCamelCase__ = "ro_RO" , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Dict =src_lang
__UpperCamelCase : Optional[Any] =tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.convert_tokens_to_ids(lowerCamelCase__ )
__UpperCamelCase : Tuple =[]
__UpperCamelCase : Optional[Any] =[self.eos_token_id, self.cur_lang_code]
__UpperCamelCase : Optional[Any] =self.convert_ids_to_tokens(self.prefix_tokens )
__UpperCamelCase : List[Any] =self.convert_ids_to_tokens(self.suffix_tokens )
__UpperCamelCase : Union[str, Any] =processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.convert_tokens_to_ids(lowerCamelCase__ )
__UpperCamelCase : int =[]
__UpperCamelCase : List[str] =[self.eos_token_id, self.cur_lang_code]
__UpperCamelCase : Optional[int] =self.convert_ids_to_tokens(self.prefix_tokens )
__UpperCamelCase : Dict =self.convert_ids_to_tokens(self.suffix_tokens )
__UpperCamelCase : List[str] =processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
__UpperCamelCase : List[Any] =os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 71 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=64, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=3, lowerCamelCase=4, lowerCamelCase=None, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase=1, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Any = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Optional[Any] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : str = vocab_size
_lowercase : List[str] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : List[Any] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Any = num_choices
_lowercase : Tuple = scope
_lowercase : Optional[Any] = q_groups
_lowercase : List[str] = k_groups
_lowercase : Optional[int] = v_groups
_lowercase : List[str] = post_attention_groups
_lowercase : Union[str, Any] = intermediate_groups
_lowercase : int = output_groups
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Any = None
if self.use_input_mask:
_lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : Dict = None
_lowercase : int = None
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = SqueezeBertModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase, lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Dict = SqueezeBertForMaskedLM(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForQuestionAnswering(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_labels
_lowercase : int = SqueezeBertForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Any = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.num_labels
_lowercase : List[str] = SqueezeBertForTokenClassification(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = self.num_choices
_lowercase : str = SqueezeBertForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Dict = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase_ : Optional[int] = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Tuple = False
lowercase_ : List[str] = True
lowercase_ : int = False
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = SqueezeBertModelTester(self)
_lowercase : Dict = ConfigTester(self, config_class=lowerCamelCase, dim=37)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = SqueezeBertModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli')
_lowercase : Optional[int] = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]])
_lowercase : List[str] = model(lowerCamelCase)[0]
_lowercase : Union[str, Any] = torch.Size((1, 3))
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : Tuple = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]])
self.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-4))
| 21 | 0 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __snake_case :
snake_case__ : Optional[int] = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached preprocessed datasets or not."})
snake_case__ : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
snake_case__ : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
snake_case__ : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class __snake_case :
snake_case__ : str = field(
default=_lowercase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
snake_case__ : str = field(
default=_lowercase , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."})
snake_case__ : Optional[str] = field(
default=_lowercase , metadata={"help": "Train language if it is different from the evaluation language."})
snake_case__ : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
snake_case__ : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
snake_case__ : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case__ : Optional[bool] = field(
default=_lowercase , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
snake_case__ : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case__ : bool = field(
default=_lowercase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''', A_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(A_ )
datasets.utils.logging.set_verbosity(A_ )
transformers.utils.logging.set_verbosity(A_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowerCamelCase : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_lowerCamelCase : Tuple = load_dataset(
'''xnli''', model_args.language, split='''train''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
_lowerCamelCase : Dict = load_dataset(
'''xnli''', model_args.train_language, split='''train''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
_lowerCamelCase : Optional[Any] = train_dataset.features['''label'''].names
if training_args.do_eval:
_lowerCamelCase : int = load_dataset(
'''xnli''', model_args.language, split='''validation''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
_lowerCamelCase : Optional[Any] = eval_dataset.features['''label'''].names
if training_args.do_predict:
_lowerCamelCase : Optional[Any] = load_dataset(
'''xnli''', model_args.language, split='''test''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
_lowerCamelCase : List[str] = predict_dataset.features['''label'''].names
# Labels
_lowerCamelCase : str = len(A_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=A_, idalabel={str(A_ ): label for i, label in enumerate(A_ )}, labelaid={label: i for i, label in enumerate(A_ )}, finetuning_task='''xnli''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_lowerCamelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, do_lower_case=model_args.do_lower_case, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_lowerCamelCase : Dict = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=A_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_lowerCamelCase : Dict = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowerCamelCase : Optional[int] = False
def preprocess_function(A_ : List[str] ):
# Tokenize the texts
return tokenizer(
examples['''premise'''], examples['''hypothesis'''], padding=A_, max_length=data_args.max_seq_length, truncation=A_, )
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCamelCase : List[Any] = min(len(A_ ), data_args.max_train_samples )
_lowerCamelCase : List[Any] = train_dataset.select(range(A_ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
_lowerCamelCase : List[str] = train_dataset.map(
A_, batched=A_, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on train dataset''', )
# Log a few random samples from the training set:
for index in random.sample(range(len(A_ ) ), 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCamelCase : str = min(len(A_ ), data_args.max_eval_samples )
_lowerCamelCase : Dict = eval_dataset.select(range(A_ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
_lowerCamelCase : List[Any] = eval_dataset.map(
A_, batched=A_, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on validation dataset''', )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_lowerCamelCase : str = min(len(A_ ), data_args.max_predict_samples )
_lowerCamelCase : List[str] = predict_dataset.select(range(A_ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
_lowerCamelCase : List[Any] = predict_dataset.map(
A_, batched=A_, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on prediction dataset''', )
# Get the metric function
_lowerCamelCase : Optional[Any] = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A_ : EvalPrediction ):
_lowerCamelCase : Tuple = p.predictions[0] if isinstance(p.predictions, A_ ) else p.predictions
_lowerCamelCase : Optional[Any] = np.argmax(A_, axis=1 )
return metric.compute(predictions=A_, references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowerCamelCase : Optional[Any] = default_data_collator
elif training_args.fpaa:
_lowerCamelCase : Any = DataCollatorWithPadding(A_, pad_to_multiple_of=8 )
else:
_lowerCamelCase : int = None
# Initialize our Trainer
_lowerCamelCase : int = Trainer(
model=A_, args=A_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=A_, tokenizer=A_, data_collator=A_, )
# Training
if training_args.do_train:
_lowerCamelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : Optional[int] = last_checkpoint
_lowerCamelCase : List[Any] = trainer.train(resume_from_checkpoint=A_ )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A_ )
)
_lowerCamelCase : Union[str, Any] = min(A_, len(A_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''', A_ )
trainer.save_metrics('''train''', A_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_lowerCamelCase : Optional[int] = trainer.evaluate(eval_dataset=A_ )
_lowerCamelCase : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A_ )
_lowerCamelCase : Any = min(A_, len(A_ ) )
trainer.log_metrics('''eval''', A_ )
trainer.save_metrics('''eval''', A_ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = trainer.predict(A_, metric_key_prefix='''predict''' )
_lowerCamelCase : Optional[int] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(A_ )
)
_lowerCamelCase : int = min(A_, len(A_ ) )
trainer.log_metrics('''predict''', A_ )
trainer.save_metrics('''predict''', A_ )
_lowerCamelCase : Dict = np.argmax(A_, axis=1 )
_lowerCamelCase : List[str] = os.path.join(training_args.output_dir, '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(A_, '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(A_ ):
_lowerCamelCase : List[Any] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 72 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Dict = JukeboxTokenizer
lowercase_ : Dict = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
import torch
_lowercase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics')
_lowercase : Optional[Any] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
@require_torch
def UpperCamelCase ( self) -> int:
"""simple docstring"""
import torch
_lowercase : List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics')
_lowercase : List[str] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
| 21 | 0 |
from math import ceil
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_0_1 ) -> int:
__lowerCamelCase : Tuple = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__lowerCamelCase : Any = 2 * i + 1
__lowerCamelCase : Tuple = 2 * i
__lowerCamelCase : Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a =int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 73 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowercase_ : int = """ssube/stable-diffusion-x4-upscaler-onnx"""
def UpperCamelCase ( self, lowerCamelCase=0) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = floats_tensor((1, 3, 1_28, 1_28), rng=random.Random(lowerCamelCase))
_lowercase : Union[str, Any] = torch.manual_seed(lowerCamelCase)
_lowercase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : str = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : List[Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : int = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[Any] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs()
_lowercase : List[str] = pipe(**lowerCamelCase).images
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
# using the PNDM scheduler by default
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np', )
_lowercase : List[Any] = output.images
_lowercase : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : List[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
_lowercase : str = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', subfolder='scheduler')
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', scheduler=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=20, generator=lowerCamelCase, output_type='np', )
_lowercase : str = output.images
_lowercase : str = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 21 | 0 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = RobertaPreLayerNormConfig.from_pretrained(
snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) )
A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
A = tensor_value
A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
model.save_pretrained(snake_case__ )
# convert tokenizer
A = AutoTokenizer.from_pretrained(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 74 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = 1
_lowercase : Any = 3
_lowercase : Tuple = (32, 32)
_lowercase : Tuple = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(lowerCamelCase)
return image
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
return model
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : str = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
return model
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = RobertaSeriesConfig(
hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=50_06, )
return RobertaSeriesModelWithTransformation(lowerCamelCase)
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
def extract(*lowerCamelCase, **lowerCamelCase):
class _lowerCamelCase:
def __init__( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = torch.ones([0])
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
self.pixel_values.to(lowerCamelCase)
return self
return Out()
return extract
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : List[Any] = self.dummy_cond_unet
_lowercase : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase)
_lowercase : Optional[Any] = self.dummy_vae
_lowercase : List[Any] = self.dummy_text_encoder
_lowercase : Any = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
_lowercase : Tuple = 77
_lowercase : int = self.dummy_image.to(lowerCamelCase)
_lowercase : int = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowercase : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
_lowercase : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase)
_lowercase : Optional[int] = alt_pipe.to(lowerCamelCase)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = 'A painting of a squirrel eating a burger'
_lowercase : Dict = torch.Generator(device=lowerCamelCase).manual_seed(0)
_lowercase : Any = alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='np', image=lowerCamelCase, )
_lowercase : Optional[int] = output.images
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(0)
_lowercase : Optional[Any] = alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='np', image=lowerCamelCase, return_dict=lowerCamelCase, )[0]
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
_lowercase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = self.dummy_cond_unet
_lowercase : Tuple = PNDMScheduler(skip_prk_steps=lowerCamelCase)
_lowercase : str = self.dummy_vae
_lowercase : Optional[Any] = self.dummy_text_encoder
_lowercase : Optional[Any] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
_lowercase : Optional[Any] = 77
_lowercase : str = self.dummy_image.to(lowerCamelCase)
# put models in fp16
_lowercase : List[str] = unet.half()
_lowercase : List[Any] = vae.half()
_lowercase : Any = bert.half()
# make sure here that pndm scheduler skips prk
_lowercase : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
_lowercase : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase)
_lowercase : Any = alt_pipe.to(lowerCamelCase)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : int = 'A painting of a squirrel eating a burger'
_lowercase : Optional[Any] = torch.manual_seed(0)
_lowercase : Union[str, Any] = alt_pipe(
[prompt], generator=lowerCamelCase, num_inference_steps=2, output_type='np', image=lowerCamelCase, ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
# resize to resolution that is divisible by 8 but not 16 or 32
_lowercase : str = init_image.resize((7_60, 5_04))
_lowercase : Optional[int] = 'BAAI/AltDiffusion'
_lowercase : str = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
pipe.enable_attention_slicing()
_lowercase : List[str] = 'A fantasy landscape, trending on artstation'
_lowercase : Any = torch.manual_seed(0)
_lowercase : Dict = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCamelCase, output_type='np', )
_lowercase : List[str] = output.images[0]
_lowercase : Tuple = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_lowercase : Optional[Any] = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : str = init_image.resize((7_68, 5_12))
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy')
_lowercase : str = 'BAAI/AltDiffusion'
_lowercase : Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
pipe.enable_attention_slicing()
_lowercase : int = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : int = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCamelCase, output_type='np', )
_lowercase : Union[str, Any] = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1E-2
| 21 | 0 |
'''simple docstring'''
import os
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =os.path.join(os.path.dirname(__snake_case ) , '''num.txt''' )
with open(__snake_case ) as file_hand:
return str(sum(int(__snake_case ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 75 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowerCamelCase( _a ):
lowercase_ : Dict = """deformable_detr"""
lowercase_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=3, lowerCamelCase=3_00, lowerCamelCase=10_24, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase="resnet50", lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=False, lowerCamelCase=3_00, lowerCamelCase=False, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : List[str] = backbone_config.get('model_type')
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Optional[int] = config_class.from_dict(lowerCamelCase)
_lowercase : Tuple = use_timm_backbone
_lowercase : List[str] = backbone_config
_lowercase : Tuple = num_channels
_lowercase : Optional[Any] = num_queries
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : Optional[int] = d_model
_lowercase : int = encoder_ffn_dim
_lowercase : List[Any] = encoder_layers
_lowercase : str = encoder_attention_heads
_lowercase : str = decoder_ffn_dim
_lowercase : Optional[Any] = decoder_layers
_lowercase : List[str] = decoder_attention_heads
_lowercase : Optional[int] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Any = activation_function
_lowercase : Optional[int] = init_std
_lowercase : int = init_xavier_std
_lowercase : Union[str, Any] = encoder_layerdrop
_lowercase : Tuple = auxiliary_loss
_lowercase : Union[str, Any] = position_embedding_type
_lowercase : str = backbone
_lowercase : List[Any] = use_pretrained_backbone
_lowercase : Any = dilation
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : Dict = encoder_n_points
_lowercase : Dict = decoder_n_points
_lowercase : Dict = two_stage
_lowercase : Union[str, Any] = two_stage_num_proposals
_lowercase : str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : Tuple = class_cost
_lowercase : int = bbox_cost
_lowercase : Optional[int] = giou_cost
# Loss coefficients
_lowercase : Optional[Any] = mask_loss_coefficient
_lowercase : Dict = dice_loss_coefficient
_lowercase : Tuple = bbox_loss_coefficient
_lowercase : Optional[int] = giou_loss_coefficient
_lowercase : Union[str, Any] = eos_coefficient
_lowercase : Union[str, Any] = focal_alpha
_lowercase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
_lowercase : Union[str, Any] = self.backbone_config.to_dict()
_lowercase : Tuple = self.__class__.model_type
return output
| 21 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
# initialize config
if "resnet-50" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-50")
elif "resnet-101" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-101")
else:
raise ValueError("Model name should include either resnet50 or resnet101")
SCREAMING_SNAKE_CASE : str = DetrConfig(use_timm_backbone=_a , backbone_config=_a)
# set label attributes
SCREAMING_SNAKE_CASE : List[str] = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE : Union[str, Any] = 250
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 91
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : Union[str, Any] = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r"))
SCREAMING_SNAKE_CASE : int = {int(_a): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase__ ( _a):
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight"))
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight"))
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias"))
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean"))
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var"))
# stages
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
))
# 3 convs
for i in range(3):
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
))
# fmt: on
for i in range(config.encoder_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
f"encoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
f"decoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
])
return rename_keys
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : str = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : int = val
def lowerCamelCase__ ( _a , _a=False):
SCREAMING_SNAKE_CASE : Optional[Any] = ""
if is_panoptic:
SCREAMING_SNAKE_CASE : Optional[int] = "detr."
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : str = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(_a , stream=_a).raw)
return im
@torch.no_grad()
def lowerCamelCase__ ( _a , _a=None , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = get_detr_config(_a)
# load original model from torch hub
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f"Converting model {model_name}...")
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_a).eval()
SCREAMING_SNAKE_CASE : Tuple = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_a):
if is_panoptic:
SCREAMING_SNAKE_CASE : List[str] = "detr." + src
rename_key(_a , _a , _a)
# query, key and value matrices need special treatment
read_in_q_k_v(_a , is_panoptic=_a)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE : List[Any] = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr")
and not key.startswith("class_labels_classifier")
and not key.startswith("bbox_predictor")
):
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Optional[int] = val
elif key.startswith("bbox_attention") or key.startswith("mask_head"):
continue
else:
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : List[Any] = val
else:
if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Any = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : int = DetrForSegmentation(_a) if is_panoptic else DetrForObjectDetection(_a)
model.load_state_dict(_a)
model.eval()
# verify our conversion on an image
SCREAMING_SNAKE_CASE : int = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE : Optional[int] = DetrImageProcessor(format=_a)
SCREAMING_SNAKE_CASE : List[str] = processor(images=prepare_img() , return_tensors="pt")
SCREAMING_SNAKE_CASE : Any = encoding["pixel_values"]
SCREAMING_SNAKE_CASE : Optional[Any] = detr(_a)
SCREAMING_SNAKE_CASE : Any = model(_a)
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3)
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3)
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
Path(_a).mkdir(exist_ok=_a)
model.save_pretrained(_a)
processor.save_pretrained(_a)
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub...")
model.push_to_hub(f"nielsr/{model_name}")
processor.push_to_hub(f"nielsr/{model_name}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
a_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 76 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[str] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : int = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = ReformerTokenizer
lowerCamelCase__ : Tuple = ReformerTokenizerFast
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Union[str, Any] = True
def _UpperCAmelCase ( self ) -> List[Any]:
super().setUp()
lowercase__ : Union[str, Any] = ReformerTokenizer(a , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = '<s>'
lowercase__ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(a ) , 1_0_0_0 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def _UpperCAmelCase ( self ) -> int:
if not self.test_rust_tokenizer:
return
lowercase__ : List[Any] = self.get_tokenizer()
lowercase__ : Dict = self.get_rust_tokenizer()
lowercase__ : List[Any] = 'I was born in 92000, and this is falsé.'
lowercase__ : Dict = tokenizer.tokenize(a )
lowercase__ : Tuple = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowercase__ : str = tokenizer.encode(a , add_special_tokens=a )
lowercase__ : Tuple = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowercase__ : Any = self.get_rust_tokenizer()
lowercase__ : Optional[int] = tokenizer.encode(a )
lowercase__ : Tuple = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _UpperCAmelCase ( self , a=1_5 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Tuple = self.rust_tokenizer_class.from_pretrained(a , **a )
# Simple input
lowercase__ : Tuple = 'This is a simple input'
lowercase__ : List[str] = ['This is a simple input 1', 'This is a simple input 2']
lowercase__ : Optional[int] = ('This is a simple input', 'This is a pair')
lowercase__ : List[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' )
# Simple input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' )
# Simple input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
# Pair input
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' )
# Pair input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' )
# Pair input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = ReformerTokenizer(a , keep_accents=a )
lowercase__ : str = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowercase__ : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ : int = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowercase__ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = 'Hello World!'
lowercase__ : Optional[int] = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : List[str] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowercase__ : int = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowercase__ : str = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowercase__ : List[Any] = ' '.join(a )
lowercase__ : List[Any] = self.big_tokenizer.encode_plus(a , return_tensors='pt' )
lowercase__ : Optional[int] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='pt' )
lowercase__ : Optional[int] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowercase__ : Dict = encoded_sequence['input_ids'].shape
lowercase__ : Dict = ReformerModel(a )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a )
model(**a )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
# fmt: off
lowercase__ : Optional[Any] = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowercase__ : List[Any] = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/reformer-crime-and-punishment' , revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a' , padding=a , sequences=a , )
| 77 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
_lowercase : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowercase : Tuple = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowercase : str = np.concatenate(lowerCamelCase_ , axis=0 )
_lowercase : Dict = np.array(lowerCamelCase_ ).astype(np.floataa ) / 2_55.0
_lowercase : Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowercase : str = 2.0 * image - 1.0
_lowercase : Tuple = torch.from_numpy(lowerCamelCase_ )
elif isinstance(image[0] , torch.Tensor ):
_lowercase : Any = torch.cat(lowerCamelCase_ , dim=0 )
return image
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0.99_95 ) -> Tuple:
if not isinstance(lowerCamelCase_ , np.ndarray ):
_lowercase : List[Any] = True
_lowercase : Any = va.device
_lowercase : Union[str, Any] = va.cpu().numpy()
_lowercase : int = va.cpu().numpy()
_lowercase : int = np.sum(va * va / (np.linalg.norm(lowerCamelCase_ ) * np.linalg.norm(lowerCamelCase_ )) )
if np.abs(lowerCamelCase_ ) > DOT_THRESHOLD:
_lowercase : Any = (1 - t) * va + t * va
else:
_lowercase : Dict = np.arccos(lowerCamelCase_ )
_lowercase : str = np.sin(lowerCamelCase_ )
_lowercase : int = theta_a * t
_lowercase : Dict = np.sin(lowerCamelCase_ )
_lowercase : Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowercase : List[Any] = sin_theta_t / sin_theta_a
_lowercase : Dict = sa * va + sa * va
if inputs_are_torch:
_lowercase : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
return va
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
for param in model.parameters():
_lowercase : Any = value
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCamelCase, text_encoder=lowerCamelCase, clip_model=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, coca_model=lowerCamelCase, coca_tokenizer=lowerCamelCase, coca_transform=lowerCamelCase, )
_lowercase : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size, lowerCamelCase)
else feature_extractor.size['shortest_edge']
)
_lowercase : Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
set_requires_grad(self.text_encoder, lowerCamelCase)
set_requires_grad(self.clip_model, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase = "auto") -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowercase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = min(int(num_inference_steps * strength), lowerCamelCase)
_lowercase : List[Any] = max(num_inference_steps - init_timestep, 0)
_lowercase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase, torch.Tensor):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(lowerCamelCase)}''')
_lowercase : Any = image.to(device=lowerCamelCase, dtype=lowerCamelCase)
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(lowerCamelCase)
]
_lowercase : int = torch.cat(lowerCamelCase, dim=0)
else:
_lowercase : int = self.vae.encode(lowerCamelCase).latent_dist.sample(lowerCamelCase)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : str = 0.1_8_2_1_5 * init_latents
_lowercase : List[str] = init_latents.repeat_interleave(lowerCamelCase, dim=0)
_lowercase : List[str] = randn_tensor(init_latents.shape, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
# get latents
_lowercase : Any = self.scheduler.add_noise(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : str = init_latents
return latents
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = self.coca_transform(lowerCamelCase).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_lowercase : List[str] = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
_lowercase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('<end_of_text>')[0].replace('<start_of_text>', '').rstrip(' .,')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Tuple = self.feature_extractor.preprocess(lowerCamelCase)
_lowercase : List[str] = torch.from_numpy(clip_image_input['pixel_values'][0]).unsqueeze(0).to(self.device).half()
_lowercase : int = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : int = image_embeddings_clip.repeat_interleave(lowerCamelCase, dim=0)
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : List[Any] = latents.detach().requires_grad_()
_lowercase : Union[str, Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Tuple = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_lowercase : Any = self.scheduler.alphas_cumprod[timestep]
_lowercase : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowercase : List[str] = torch.sqrt(lowerCamelCase)
_lowercase : Dict = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, lowerCamelCase):
_lowercase : Dict = self.scheduler.sigmas[index]
_lowercase : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler)} not supported''')
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Dict = 1 / 0.1_8_2_1_5 * sample
_lowercase : Optional[Any] = self.vae.decode(lowerCamelCase).sample
_lowercase : int = (image / 2 + 0.5).clamp(0, 1)
_lowercase : Any = transforms.Resize(self.feature_extractor_size)(lowerCamelCase)
_lowercase : Optional[Any] = self.normalize(lowerCamelCase).to(latents.dtype)
_lowercase : List[str] = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : Optional[Any] = spherical_dist_loss(lowerCamelCase, lowerCamelCase).mean() * clip_guidance_scale
_lowercase : str = -torch.autograd.grad(lowerCamelCase, lowerCamelCase)[0]
if isinstance(self.scheduler, lowerCamelCase):
_lowercase : Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowercase : List[str] = noise_pred_original
else:
_lowercase : List[Any] = noise_pred_original - torch.sqrt(lowerCamelCase) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = 5_12, lowerCamelCase = 5_12, lowerCamelCase = 0.6, lowerCamelCase = 50, lowerCamelCase = 7.5, lowerCamelCase = 1, lowerCamelCase = 0.0, lowerCamelCase = 1_00, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, lowerCamelCase = 0.8, lowerCamelCase = 0.1, lowerCamelCase = 0.1, ) -> int:
"""simple docstring"""
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(lowerCamelCase)} generators.''')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''')
if isinstance(lowerCamelCase, torch.Generator) and batch_size > 1:
_lowercase : Dict = [generator] + [None] * (batch_size - 1)
_lowercase : Optional[int] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowercase : Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowercase : str = ', '.join(lowerCamelCase)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : List[Any] = self.get_image_description(lowerCamelCase)
if style_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : Dict = self.get_image_description(lowerCamelCase)
# get prompt text embeddings for content and style
_lowercase : Optional[int] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : Optional[int] = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_lowercase : Union[str, Any] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : List[Any] = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_lowercase : Any = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# duplicate text embeddings for each generation per prompt
_lowercase : Dict = text_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# set timesteps
_lowercase : Dict = 'offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_offset:
_lowercase : Any = 1
self.scheduler.set_timesteps(lowerCamelCase, **lowerCamelCase)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_lowercase , _lowercase : List[Any] = self.get_timesteps(lowerCamelCase, lowerCamelCase, self.device)
_lowercase : str = timesteps[:1].repeat(lowerCamelCase)
# Preprocess image
_lowercase : str = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : int = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : Optional[int] = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
if clip_guidance_scale > 0:
_lowercase : Optional[int] = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = slerp(
lowerCamelCase, lowerCamelCase, lowerCamelCase)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowercase : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowercase : Tuple = content_text_input.input_ids.shape[-1]
_lowercase : Union[str, Any] = self.tokenizer([''], padding='max_length', max_length=lowerCamelCase, return_tensors='pt')
_lowercase : int = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_lowercase : Union[str, Any] = uncond_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowercase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowercase : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowercase : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowercase : List[Any] = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='cpu', dtype=lowerCamelCase).to(
self.device)
else:
_lowercase : Any = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase)
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
_lowercase : Tuple = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_lowercase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowercase : Dict = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_eta:
_lowercase : List[Any] = eta
# check if the scheduler accepts generator
_lowercase : Dict = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_lowercase : str = generator
with self.progress_bar(total=lowerCamelCase):
for i, t in enumerate(lowerCamelCase):
# expand the latents if we are doing classifier free guidance
_lowercase : List[str] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_lowercase : List[Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Dict = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowercase , _lowercase : Optional[Any] = noise_pred.chunk(2)
_lowercase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowercase : Tuple = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_lowercase , _lowercase : List[Any] = self.cond_fn(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, )
# compute the previous noisy sample x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Any = 1 / 0.1_8_2_1_5 * latents
_lowercase : List[str] = self.vae.decode(lowerCamelCase).sample
_lowercase : Tuple = (image / 2 + 0.5).clamp(0, 1)
_lowercase : List[Any] = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase : List[Any] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase)
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowercase_ ):
UpperCAmelCase = burst_time[i]
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 999999999
UpperCAmelCase = 0
UpperCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowercase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCAmelCase = remaining_time[j]
UpperCAmelCase = j
UpperCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCAmelCase = remaining_time[short]
if minm == 0:
UpperCAmelCase = 999999999
if remaining_time[short] == 0:
complete += 1
UpperCAmelCase = False
# Find finish time of current process
UpperCAmelCase = increment_time + 1
# Calculate waiting time
UpperCAmelCase = finish_time - arrival_time[short]
UpperCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = [0] * no_of_processes
for i in range(lowercase_ ):
UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = 0
UpperCAmelCase = 0
for i in range(lowercase_ ):
UpperCAmelCase = total_waiting_time + waiting_time[i]
UpperCAmelCase = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
snake_case_ = int(input())
snake_case_ = [0] * no_of_processes
snake_case_ = [0] * no_of_processes
snake_case_ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
snake_case_ , snake_case_ = map(int, input().split())
snake_case_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
snake_case_ = burst_time
snake_case_ = no_of_processes
snake_case_ = waiting_time
snake_case_ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
snake_case_ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 78 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = ConsistencyModelPipeline
lowercase_ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase_ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase_ : List[str] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet', )
return unet
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet_class_cond', )
return unet
def UpperCamelCase ( self, lowerCamelCase=False) -> Dict:
"""simple docstring"""
if class_cond:
_lowercase : Union[str, Any] = self.dummy_cond_unet
else:
_lowercase : Union[str, Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
_lowercase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Tuple:
"""simple docstring"""
if str(lowerCamelCase).startswith('mps'):
_lowercase : str = torch.manual_seed(lowerCamelCase)
else:
_lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Optional[int] = self.get_dummy_components()
_lowercase : str = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : Dict = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : int = image[0, -3:, -3:, -1]
_lowercase : Dict = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase)
_lowercase : Any = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Any = 0
_lowercase : List[str] = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Any = self.get_dummy_components()
_lowercase : Optional[Any] = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : List[str] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Union[str, Any] = 1
_lowercase : Tuple = None
_lowercase : Tuple = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase)
_lowercase : Dict = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : Optional[Any] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Tuple = 1
_lowercase : int = None
_lowercase : Tuple = 0
_lowercase : Dict = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase=False, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = torch.manual_seed(lowerCamelCase)
_lowercase : str = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
_lowercase : Optional[Any] = self.get_fixed_latents(seed=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase, shape=lowerCamelCase)
_lowercase : Tuple = latents
return inputs
def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Any:
"""simple docstring"""
if type(lowerCamelCase) == str:
_lowercase : Union[str, Any] = torch.device(lowerCamelCase)
_lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : List[str] = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
return latents
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Any = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : Optional[Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Union[str, Any] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_inputs()
_lowercase : int = 1
_lowercase : Optional[Any] = None
_lowercase : str = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : List[Any] = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
@require_torch_a
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Optional[int] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase):
_lowercase : Dict = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@require_torch_a
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : int = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase)
_lowercase : int = 1
_lowercase : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase):
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : int = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 21 | 0 |
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase = False ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_A = F'''Expected string as input, found {type(__lowercase )}'''
raise ValueError(__lowercase )
if not isinstance(__lowercase , __lowercase ):
_A = F'''Expected boolean as use_pascal parameter, found {type(__lowercase )}'''
raise ValueError(__lowercase )
_A = input_str.split("_" )
_A = 0 if use_pascal else 1
_A = words[start_index:]
_A = [word[0].upper() + word[1:] for word in words_to_capitalize]
_A = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 79 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : int = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> tuple[int, int]:
_lowercase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_lowercase : int = x_den * y_den * z_den
_lowercase : int = gcd(lowerCamelCase_ , lowerCamelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_( lowerCamelCase_ = 35 ) -> int:
_lowercase : set = set()
_lowercase : int
_lowercase : Fraction = Fraction(0 )
_lowercase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_lowercase : int = x_num * y_den + x_den * y_num
_lowercase : int = x_den * y_den
_lowercase : str = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : List[Any] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_lowercase : List[Any] = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : int = int(sqrt(lowerCamelCase_ ) )
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Optional[int] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=-1
_lowercase : Any = x_num * y_num
_lowercase : str = x_den * y_num + x_num * y_den
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : int = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : str = x_num * x_num * y_num * y_num
_lowercase : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : List[str] = int(sqrt(lowerCamelCase_ ) )
_lowercase : Union[str, Any] = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Tuple = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
for num, den in unique_s:
total += Fraction(lowerCamelCase_ , lowerCamelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 21 | 0 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
a__ : str = logging.get_logger(__name__)
class lowercase_ ( a__ ):
__UpperCAmelCase = 'vision-encoder-decoder'
__UpperCAmelCase = True
def __init__( self , **a ):
super().__init__(**a )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
UpperCamelCase__ = kwargs.pop("encoder" )
UpperCamelCase__ = encoder_config.pop("model_type" )
UpperCamelCase__ = kwargs.pop("decoder" )
UpperCamelCase__ = decoder_config.pop("model_type" )
UpperCamelCase__ = AutoConfig.for_model(a , **a )
UpperCamelCase__ = AutoConfig.for_model(a , **a )
UpperCamelCase__ = True
@classmethod
def __a ( cls , a , a , **a ):
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
UpperCamelCase__ = True
UpperCamelCase__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a )
def __a ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.encoder.to_dict()
UpperCamelCase__ = self.decoder.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
class lowercase_ ( a__ ):
__UpperCAmelCase = version.parse('1.11' )
@property
def __a ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __a ( self ):
return 1e-4
@property
def __a ( self ):
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class lowercase_ ( a__ ):
@property
def __a ( self ):
UpperCamelCase__ = OrderedDict()
UpperCamelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
UpperCamelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
UpperCamelCase__ = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def __a ( self , a , a = -1 , a = -1 , a = False , a = None , ):
import torch
UpperCamelCase__ = OrderedDict()
UpperCamelCase__ = super().generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
UpperCamelCase__ , UpperCamelCase__ = dummy_input["input_ids"].shape
UpperCamelCase__ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase__ = dummy_input.pop("input_ids" )
UpperCamelCase__ = dummy_input.pop("attention_mask" )
UpperCamelCase__ = torch.zeros(a )
return common_inputs
class lowercase_ ( a__ ):
@property
def __a ( self ):
pass
def __a ( self , a ):
return VisionEncoderDecoderEncoderOnnxConfig(a )
def __a ( self , a , a , a = "default" ):
UpperCamelCase__ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(a , a )
| 80 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 | 0 |
"""simple docstring"""
import argparse
lowerCamelCase_ : int = """docs/source/_static/js/custom.js"""
def _A ( lowercase ):
"""simple docstring"""
with open(lowercase , encoding='''utf-8''' , newline='''\n''' ) as f:
a =f.readlines()
a =0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
a =f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lowercase )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
lowerCamelCase_ : Optional[Any] = parser.parse_args()
update_custom_js(args.version) | 81 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
if len(lowerCamelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
_lowercase : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
from __future__ import annotations
import math
A__ = """2020.9.26"""
A__ = """xcodz-dot, cclaus, dhruvmanila"""
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
if not all(isinstance(snake_case , (float, int) ) for val in locals().values() ):
_lowerCAmelCase = F'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(snake_case )
_lowerCAmelCase = ((x * distance) / (z + distance)) * scale
_lowerCAmelCase = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
raise TypeError("""Axis must be a str""" )
_lowerCAmelCase = locals()
del input_variables["axis"]
if not all(isinstance(snake_case , (float, int) ) for val in input_variables.values() ):
_lowerCAmelCase = (
"""Input values except axis must either be float or int: """
F'{list(input_variables.values() )}'
)
raise TypeError(snake_case )
_lowerCAmelCase = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
_lowerCAmelCase = x * math.cos(snake_case ) - y * math.sin(snake_case )
_lowerCAmelCase = y * math.cos(snake_case ) + x * math.sin(snake_case )
_lowerCAmelCase = z
elif axis == "x":
_lowerCAmelCase = y * math.cos(snake_case ) - z * math.sin(snake_case )
_lowerCAmelCase = z * math.cos(snake_case ) + y * math.sin(snake_case )
_lowerCAmelCase = x
elif axis == "y":
_lowerCAmelCase = x * math.cos(snake_case ) - z * math.sin(snake_case )
_lowerCAmelCase = z * math.cos(snake_case ) + x * math.sin(snake_case )
_lowerCAmelCase = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }")
print(f"{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }")
| 82 |
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCamelCase_( lowerCamelCase_ = 200_0000 ) -> int:
_lowercase : list[int] = [0]
_lowercase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
_lowercase : int = 0
# an estimate of b, using the quadratic formula
_lowercase : float
# the largest integer less than b_estimate
_lowercase : int
# the largest integer less than b_estimate
_lowercase : int
# the triangle number corresponding to b_floor
_lowercase : int
# the triangle number corresponding to b_ceil
_lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowercase : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowercase : List[str] = floor(lowerCamelCase_ )
_lowercase : Dict = ceil(lowerCamelCase_ )
_lowercase : List[str] = triangle_numbers[b_floor]
_lowercase : List[str] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Union[str, Any] = triangle_b_first_guess * triangle_a
_lowercase : Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Any = triangle_b_second_guess * triangle_a
_lowercase : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 21 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
snake_case_ : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def A__ ( UpperCAmelCase_ ):
for pegasus_name, hf_name in PATTERNS:
_UpperCamelCase : Optional[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
return k
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase_ )
_UpperCamelCase : Any = PegasusConfig(**UpperCAmelCase_ )
_UpperCamelCase : int = PegasusForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = torch_model.model.state_dict()
_UpperCamelCase : Optional[int] = {}
for k, v in tf_weights.items():
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
_UpperCamelCase : int = v.T
_UpperCamelCase : List[str] = torch.tensor(UpperCAmelCase_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
_UpperCamelCase : List[Any] = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
_UpperCamelCase : Any = mapping['shared.weight']
_UpperCamelCase : Optional[Any] = mapping['shared.weight']
_UpperCamelCase : str = {k: torch.zeros_like(UpperCAmelCase_ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : int = torch_model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def A__ ( UpperCAmelCase_="./ckpt/aeslc/model.ckpt-32000" ):
_UpperCamelCase : str = tf.train.list_variables(UpperCAmelCase_ )
_UpperCamelCase : int = {}
_UpperCamelCase : Optional[int] = ['Adafactor', 'global_step']
for name, shape in tqdm(UpperCAmelCase_ , desc='converting tf checkpoint to dict' ):
_UpperCamelCase : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCamelCase : List[str] = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = array
return tf_weights
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
# save tokenizer first
_UpperCamelCase : Optional[int] = Path(UpperCAmelCase_ ).parent.name
_UpperCamelCase : Dict = task_specific_params[f'summarization_{dataset}']['max_position_embeddings']
_UpperCamelCase : Tuple = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=UpperCAmelCase_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase_ )
# convert model
_UpperCamelCase : Optional[Any] = get_tf_weights_as_numpy(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
_UpperCamelCase : Optional[int] = task_specific_params
_UpperCamelCase : int = convert_pegasus(UpperCAmelCase_ , UpperCAmelCase_ )
torch_model.save_pretrained(UpperCAmelCase_ )
_UpperCamelCase : List[str] = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(UpperCAmelCase_ , Path(UpperCAmelCase_ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
snake_case_ : Optional[int] = parser.parse_args()
if args.save_dir is None:
snake_case_ : List[str] = Path(args.tf_ckpt_path).parent.name
snake_case_ : Optional[int] = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 83 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _lowerCamelCase:
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : str = np.abs((a - b)).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Any = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase , _lowercase : Union[str, Any] = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : str = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : Tuple = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase)
_lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : Tuple = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : str = after_output[0]
_lowercase : Optional[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-3)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase , _lowercase : Any = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : Tuple = model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase)
_lowercase : int = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase), vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : Optional[Any] = to_atuple(vision_model.config.image_size)
_lowercase : Any = to_atuple(vision_model.config.patch_size)
_lowercase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len))
_lowercase : List[str] = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase), text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
pt_model.to(lowerCamelCase)
pt_model.eval()
# prepare inputs
_lowercase : Any = inputs_dict
_lowercase : Optional[int] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
_lowercase : Tuple = pt_model(**lowerCamelCase).to_tuple()
_lowercase : Any = fx_model(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase)
_lowercase : List[Any] = fx_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase)
_lowercase : List[Any] = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase)
pt_model_loaded.to(lowerCamelCase)
pt_model_loaded.eval()
with torch.no_grad():
_lowercase : Optional[Any] = pt_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[Any] = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : str = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase)
_lowercase : List[Any] = fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Tuple = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : List[str] = load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params)
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase)
@is_pt_flax_cross_test
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase : List[str] = config_inputs_dict.pop('vision_config')
_lowercase : str = config_inputs_dict.pop('text_config')
_lowercase : int = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase)
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.get_pretrained_model_and_inputs()
_lowercase : Optional[int] = model_a(**lowerCamelCase)
_lowercase : Tuple = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : List[Any] = model_a(**lowerCamelCase)
_lowercase : Tuple = after_outputs[0]
_lowercase : Dict = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-5)
@require_flax
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : List[Any] = 13
_lowercase : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Tuple = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Union[str, Any] = random_attention_mask([batch_size, 4])
_lowercase : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModelTester(self)
_lowercase : Any = FlaxBertModelTester(self)
_lowercase : Dict = vit_model_tester.prepare_config_and_inputs()
_lowercase : Any = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : List[str] = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : Tuple = 13
_lowercase : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Union[str, Any] = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Any = random_attention_mask([batch_size, 4])
_lowercase : Dict = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = FlaxCLIPVisionModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = FlaxCLIPVisionModelTester(self)
_lowercase : Union[str, Any] = FlaxBertModelTester(self)
_lowercase : Tuple = clip_model_tester.prepare_config_and_inputs()
_lowercase : str = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : Dict = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian', logit_scale_init_value=1.0)
_lowercase : List[str] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
_lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_lowercase : List[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='np')
_lowercase : List[Any] = model(**lowerCamelCase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
_lowercase : Optional[int] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]])
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3))
| 21 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ :list[list[str]] = [[] for _ in range(lowercase__ )]
lowerCAmelCase_ :Optional[Any] = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(lowercase__ ) <= key:
return input_string
for position, character in enumerate(lowercase__ ):
lowerCAmelCase_ :List[str] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_ :int = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowercase__ )
lowerCAmelCase_ :str = ["""""".join(lowercase__ ) for row in temp_grid]
lowerCAmelCase_ :Any = """""".join(lowercase__ )
return output_string
def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = []
lowerCAmelCase_ :List[Any] = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
lowerCAmelCase_ :list[list[str]] = [[] for _ in range(lowercase__ )] # generates template
for position in range(len(lowercase__ ) ):
lowerCAmelCase_ :Any = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_ :Dict = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
lowerCAmelCase_ :Tuple = 0
for row in temp_grid: # fills in the characters
lowerCAmelCase_ :Dict = input_string[counter : counter + len(lowercase__ )]
grid.append(list(lowercase__ ) )
counter += len(lowercase__ )
lowerCAmelCase_ :List[Any] = """""" # reads as zigzag
for position in range(len(lowercase__ ) ):
lowerCAmelCase_ :Tuple = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_ :str = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _snake_case ( lowercase__ : str ) -> dict[int, str]:
'''simple docstring'''
lowerCAmelCase_ :int = {}
for key_guess in range(1 , len(lowercase__ ) ): # tries every key
lowerCAmelCase_ :int = decrypt(lowercase__ , lowercase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
import random
from typing import Any
def UpperCamelCase_( lowerCamelCase_ ) -> list[Any]:
for _ in range(len(lowerCamelCase_ ) ):
_lowercase : Optional[int] = random.randint(0 , len(lowerCamelCase_ ) - 1 )
_lowercase : str = random.randint(0 , len(lowerCamelCase_ ) - 1 )
_lowercase , _lowercase : Optional[int] = data[b], data[a]
return data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = [0, 1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE : int = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 21 | 0 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = len(snake_case )
while cur > 1:
# Find the maximum number in arr
snake_case_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
snake_case_ = arr[mi::-1] + arr[mi + 1 : len(snake_case )]
# Reverse whole list
snake_case_ = arr[cur - 1 :: -1] + arr[cur : len(snake_case )]
cur -= 1
return arr
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = input("Enter numbers separated by a comma:\n").strip()
_SCREAMING_SNAKE_CASE : int = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 85 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Tuple = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCamelCase, 'width_multiplier'))
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=64, lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase="swish", lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=None, lowerCamelCase=0.2_5, lowerCamelCase=0.0, lowerCamelCase=0.0, ) -> Any:
"""simple docstring"""
_lowercase : Any = parent
_lowercase : Optional[int] = batch_size
_lowercase : Dict = image_size
_lowercase : str = patch_size
_lowercase : Optional[int] = num_channels
_lowercase : Optional[Any] = make_divisible(5_12 * width_multiplier, divisor=8)
_lowercase : str = hidden_act
_lowercase : Dict = conv_kernel_size
_lowercase : int = output_stride
_lowercase : Optional[Any] = classifier_dropout_prob
_lowercase : Tuple = use_labels
_lowercase : int = is_training
_lowercase : Optional[Any] = num_labels
_lowercase : Dict = initializer_range
_lowercase : List[str] = scope
_lowercase : Tuple = width_multiplier
_lowercase : List[str] = ffn_dropout
_lowercase : Dict = attn_dropout
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : Dict = None
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels)
_lowercase : str = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : int = self.num_labels
_lowercase : Optional[int] = MobileViTVaForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
_lowercase : List[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : int = config_and_inputs
_lowercase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ : Dict = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ : List[Any] = False
lowercase_ : Optional[int] = False
lowercase_ : List[Any] = False
lowercase_ : Tuple = False
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = MobileViTVaModelTester(self)
_lowercase : Tuple = MobileViTVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.')
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(lowerCamelCase)
_lowercase : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Any = [*signature.parameters.keys()]
_lowercase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : Optional[Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : List[Any] = outputs.hidden_states
_lowercase : Tuple = 5
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowercase : Optional[int] = 2
for i in range(len(lowerCamelCase)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2)
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Tuple = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Optional[Any] = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = MobileViTVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> Dict:
_lowercase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256')
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256').to(
lowerCamelCase)
_lowercase : Dict = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Tuple = model(**lowerCamelCase)
# verify the logits
_lowercase : Optional[int] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Optional[int] = model.to(lowerCamelCase)
_lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Tuple = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : List[Any] = model(**lowerCamelCase)
_lowercase : str = outputs.logits
# verify the logits
_lowercase : Tuple = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
], device=lowerCamelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Tuple = model.to(lowerCamelCase)
_lowercase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : int = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**lowerCamelCase)
_lowercase : Any = outputs.logits.detach().cpu()
_lowercase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase, target_sizes=[(50, 60)])
_lowercase : Any = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
_lowercase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase)
_lowercase : Optional[int] = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
__lowerCAmelCase , __lowerCAmelCase : str = coefficient_matrix.shape
__lowerCAmelCase , __lowerCAmelCase : Dict = constant_matrix.shape
if rowsa != colsa:
__lowerCAmelCase : Tuple = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(_UpperCamelCase )
if colsa != 1:
__lowerCAmelCase : int = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(_UpperCamelCase )
if rowsa != rowsa:
__lowerCAmelCase : Union[str, Any] = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(_UpperCamelCase )
if len(_UpperCamelCase ) != rowsa:
__lowerCAmelCase : str = (
'Number of initial values must be equal to number of rows in coefficient '
F"matrix but received {len(_UpperCamelCase )} and {rowsa}"
)
raise ValueError(_UpperCamelCase )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
__lowerCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__lowerCAmelCase , __lowerCAmelCase : str = table.shape
strictly_diagonally_dominant(_UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(_UpperCamelCase ):
__lowerCAmelCase : Tuple = []
for row in range(_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
for col in range(_UpperCamelCase ):
if col == row:
__lowerCAmelCase : Optional[Any] = table[row][col]
elif col == cols - 1:
__lowerCAmelCase : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__lowerCAmelCase : Tuple = (temp + val) / denom
new_val.append(_UpperCamelCase )
__lowerCAmelCase : Tuple = new_val
return [float(_UpperCamelCase ) for i in new_val]
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase : int = table.shape
__lowerCAmelCase : Tuple = True
for i in range(0 , _UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE : str = "bart"
SCREAMING_SNAKE_CASE : Optional[int] = True
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> int:
if LOAD_DENSE_INDEX:
_lowercase : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
_lowercase : Union[str, Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
_lowercase : str = qar_model.eval()
else:
_lowercase , _lowercase : Any = (None, None)
if MODEL_TYPE == "bart":
_lowercase : Dict = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
_lowercase : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
_lowercase : Any = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
_lowercase : List[Any] = sas_model.eval()
else:
_lowercase , _lowercase : Union[str, Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> str:
if LOAD_DENSE_INDEX:
_lowercase : Optional[Any] = faiss.StandardGpuResources()
_lowercase : Optional[int] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
_lowercase : Tuple = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
_lowercase : Any = faiss.IndexFlatIP(128 )
_lowercase : Union[str, Any] = faiss.index_cpu_to_gpu(lowerCamelCase_ , 1 , lowerCamelCase_ )
wikiaab_gpu_index_flat.add(lowerCamelCase_ ) # TODO fix for larger GPU
else:
_lowercase , _lowercase : Any = (None, None)
_lowercase : List[str] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> Any:
_lowercase : List[str] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
_lowercase : Optional[Any] = elia['train_eli5']
_lowercase : Tuple = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
_lowercase : Union[str, Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCamelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = load_train_data()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=10 ) -> List[str]:
_lowercase : Any = embed_questions_for_retrieval([question] , lowerCamelCase_ , lowerCamelCase_ )
_lowercase , _lowercase : List[str] = eli5_train_q_index.search(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = [elia_train[int(lowerCamelCase_ )] for i in I[0]]
return nn_examples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_="wiki40b" , lowerCamelCase_="dense" , lowerCamelCase_=10 ) -> Dict:
if source == "none":
_lowercase , _lowercase : Union[str, Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_lowercase , _lowercase : Dict = query_qa_dense_index(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
_lowercase , _lowercase : str = query_es_index(
lowerCamelCase_ , lowerCamelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=lowerCamelCase_ , )
_lowercase : List[Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
_lowercase : Union[str, Any] = 'question: {} context: {}'.format(lowerCamelCase_ , lowerCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase_ : None),
} )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_=256 , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=0.95 , lowerCamelCase_=0.8 ) -> Dict:
with torch.no_grad():
_lowercase : str = qa_sas_generate(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_answers=1 , num_beams=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ , do_sample=lowerCamelCase_ , temp=lowerCamelCase_ , top_p=lowerCamelCase_ , top_k=lowerCamelCase_ , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
SCREAMING_SNAKE_CASE : Union[str, Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
SCREAMING_SNAKE_CASE : List[Any] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE : Any = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
SCREAMING_SNAKE_CASE : Optional[int] = st.sidebar.checkbox("Demo options")
if demo_options:
SCREAMING_SNAKE_CASE : List[str] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE : Optional[int] = action_list.index(action_st)
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
SCREAMING_SNAKE_CASE : int = show_type == "Show full text of passages"
else:
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
SCREAMING_SNAKE_CASE : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
SCREAMING_SNAKE_CASE : int = "wiki40b"
SCREAMING_SNAKE_CASE : int = "dense"
SCREAMING_SNAKE_CASE : str = "beam"
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : List[str] = 64
SCREAMING_SNAKE_CASE : Union[str, Any] = 256
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Generation options")
if generate_options:
SCREAMING_SNAKE_CASE : Any = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE : List[Any] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE : int = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE : int = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : Any = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : str = None
# start main text
SCREAMING_SNAKE_CASE : List[str] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
SCREAMING_SNAKE_CASE : str = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE : List[str] = st.text_input("Enter your question here:", "")
else:
SCREAMING_SNAKE_CASE : Optional[int] = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = make_support(question, source=wiki_source, method="dense", n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method="sparse", n_results=10)
SCREAMING_SNAKE_CASE : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE : Optional[Any] = support_list[:10]
SCREAMING_SNAKE_CASE : int = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE : Optional[Any] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
SCREAMING_SNAKE_CASE : List[Any] = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE : Union[str, Any] = "[{}]({})".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE : Any = sec_titles.split(" & ")
SCREAMING_SNAKE_CASE : List[Any] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE : str = find_nearest_training(question)
SCREAMING_SNAKE_CASE : Any = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
SCREAMING_SNAKE_CASE : str = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
SCREAMING_SNAKE_CASE : Tuple = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 21 | 0 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any):
return (abs(source - target) / target) < 0.01
@pytest.mark.integration
def lowercase_ ( _lowerCamelCase : Any):
lowercase__ : Optional[int] = _TestCommandArgs(dataset=_lowerCamelCase , all_configs=_lowerCamelCase , save_infos=_lowerCamelCase)
lowercase__ : int = TestCommand(*_lowerCamelCase)
test_command.run()
lowercase__ : Dict = os.path.join(_lowerCamelCase , "README.md")
assert os.path.exists(_lowerCamelCase)
lowercase__ : str = DatasetInfosDict.from_directory(_lowerCamelCase)
lowercase__ : Dict = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string")),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"])),
"langs": Sequence(Value("string")),
"spans": Sequence(Value("string")),
}) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
})
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase__ , lowercase__ : Union[str, Any] = getattr(dataset_infos["default"] , _lowerCamelCase), getattr(expected_dataset_infos["default"] , _lowerCamelCase)
if key == "num_bytes":
assert is_apercent_close(_lowerCamelCase , _lowerCamelCase)
elif key == "splits":
assert list(_lowerCamelCase) == list(_lowerCamelCase)
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes)
else:
result == expected
| 87 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : List[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _lowerCamelCase( _a ):
lowercase_ : Any = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
SCREAMING_SNAKE_CASE : Any = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
SCREAMING_SNAKE_CASE : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class _lowerCamelCase:
def __call__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
elif titles is None or texts is None:
_lowercase : Dict = titles if texts is None else texts
return super().__call__(
lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
_lowercase : Union[str, Any] = titles if not isinstance(lowerCamelCase, lowerCamelCase) else [titles]
_lowercase : Tuple = texts if not isinstance(lowerCamelCase, lowerCamelCase) else [texts]
_lowercase : Optional[Any] = len(lowerCamelCase)
_lowercase : Any = questions if not isinstance(lowerCamelCase, lowerCamelCase) else [questions] * n_passages
if len(lowerCamelCase) != len(lowerCamelCase):
raise ValueError(
F'''There should be as many titles than texts but got {len(lowerCamelCase)} titles and {len(lowerCamelCase)} texts.''')
_lowercase : Any = super().__call__(lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : Tuple = super().__call__(lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : int = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase, lowerCamelCase)
]
}
if return_attention_mask is not False:
_lowercase : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_lowercase : Union[str, Any] = attention_mask
return self.pad(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 16, lowerCamelCase = 64, lowerCamelCase = 4, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : Union[str, Any] = reader_input['input_ids']
_lowercase , _lowercase , _lowercase : Tuple = reader_output[:3]
_lowercase : Tuple = len(lowerCamelCase)
_lowercase : str = sorted(range(lowerCamelCase), reverse=lowerCamelCase, key=relevance_logits.__getitem__)
_lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowercase : str = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_lowercase : Any = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowercase : List[Any] = sequence_ids.index(self.pad_token_id)
else:
_lowercase : List[str] = len(lowerCamelCase)
_lowercase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCamelCase, top_spans=lowerCamelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCamelCase, start_index=lowerCamelCase, end_index=lowerCamelCase, text=self.decode(sequence_ids[start_index : end_index + 1]), ))
if len(lowerCamelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : str = []
for start_index, start_score in enumerate(lowerCamelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_lowercase : Dict = sorted(lowerCamelCase, key=lambda lowerCamelCase: x[1], reverse=lowerCamelCase)
_lowercase : List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''')
_lowercase : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCamelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class _lowerCamelCase( _a, _a ):
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Any = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
lowercase_ : str = ["""input_ids""", """attention_mask"""]
| 21 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : int = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : Dict = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Dict = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
__lowerCAmelCase : Optional[int] = '▁'
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["""input_ids""", """attention_mask"""]
a__ = BarthezTokenizer
def __init__( self : Any , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : List[str]="</s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : int="<unk>" , UpperCamelCase__ : Tuple="<pad>" , UpperCamelCase__ : Tuple="<mask>" , **UpperCamelCase__ : List[Any] , ) -> List[Any]:
"""simple docstring"""
__magic_name__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
__magic_name__ = vocab_file
__magic_name__ = False if not self.vocab_file else True
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
__magic_name__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 88 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not numbers:
return 0
if not isinstance(lowerCamelCase_ , (list, tuple) ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
_lowercase : int = numbers[0]
for i in range(1 , len(lowerCamelCase_ ) ):
# update the maximum and minimum subarray products
_lowercase : Union[str, Any] = numbers[i]
if number < 0:
_lowercase , _lowercase : Any = min_till_now, max_till_now
_lowercase : Union[str, Any] = max(lowerCamelCase_ , max_till_now * number )
_lowercase : Union[str, Any] = min(lowerCamelCase_ , min_till_now * number )
# update the maximum product found till now
_lowercase : Optional[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
return max_prod
| 21 | 0 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
_a : Optional[int] = str(bin(lowerCAmelCase_ ) )
binary_number += "0" * shift_amount
return binary_number
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
_a : Tuple = str(bin(lowerCAmelCase_ ) )[2:]
if shift_amount >= len(lowerCAmelCase_ ):
return "0b0"
_a : Optional[int] = binary_number[: len(lowerCAmelCase_ ) - shift_amount]
return "0b" + shifted_binary_number
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
if number >= 0: # Get binary representation of positive number
_a : Optional[Any] = '0' + str(bin(lowerCAmelCase_ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
_a : List[str] = len(bin(lowerCAmelCase_ )[3:] ) # Find 2's complement of number
_a : Optional[int] = bin(abs(lowerCAmelCase_ ) - (1 << binary_number_length) )[3:]
_a : Dict = (
'1' + '0' * (binary_number_length - len(lowerCAmelCase_ )) + binary_number
)
if shift_amount >= len(lowerCAmelCase_ ):
return "0b" + binary_number[0] * len(lowerCAmelCase_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(lowerCAmelCase_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE : Tuple = (3, 9, -11, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowerCamelCase:
lowercase_ : int
lowercase_ : Node | None
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Node | None = None
for i in sorted(lowerCamelCase, reverse=lowerCamelCase):
_lowercase : Tuple = Node(lowerCamelCase, self.head)
def __iter__( self) -> Iterator[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.head
while node:
yield node.data
_lowercase : int = node.next_node
def __len__( self) -> int:
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self) -> str:
"""simple docstring"""
return " -> ".join([str(lowerCamelCase) for node in self])
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> SortedLinkedList:
return SortedLinkedList(list(lowerCamelCase_ ) + list(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 21 | 0 |
from math import pi, sqrt
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
if num > 1_71.5:
raise OverflowError('math range error' )
elif num - int(UpperCamelCase__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(UpperCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(UpperCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__A = 1.0
while num:
__A = float(input("Gamma of: "))
print(f'''gamma({num}) = {gamma(num)}''')
print("\nEnter 0 to exit...")
| 90 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Any = KandinskyImgaImgPipeline
lowercase_ : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
lowercase_ : Any = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
lowercase_ : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ : Union[str, Any] = False
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return 1_00
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=10_05, )
_lowercase : Optional[int] = MultilingualCLIP(lowerCamelCase)
_lowercase : List[str] = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Union[str, Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase : Optional[Any] = UNetaDConditionModel(**lowerCamelCase)
return model
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.dummy_text_encoder
_lowercase : List[Any] = self.dummy_tokenizer
_lowercase : int = self.dummy_unet
_lowercase : int = self.dummy_movq
_lowercase : Optional[int] = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowercase : List[Any] = DDIMScheduler(**lowerCamelCase)
_lowercase : List[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Dict:
"""simple docstring"""
_lowercase : List[str] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[Any] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(lowerCamelCase)
# create init_image
_lowercase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[int] = image.cpu().permute(0, 2, 3, 1)[0]
_lowercase : Tuple = Image.fromarray(np.uinta(lowerCamelCase)).convert('RGB').resize((2_56, 2_56))
if str(lowerCamelCase).startswith('mps'):
_lowercase : List[str] = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = 'cpu'
_lowercase : Tuple = self.get_dummy_components()
_lowercase : str = self.pipeline_class(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = pipe(**self.get_dummy_inputs(lowerCamelCase))
_lowercase : Optional[int] = output.images
_lowercase : List[Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase), return_dict=lowerCamelCase, )[0]
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : Tuple = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy')
_lowercase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
_lowercase : Optional[int] = 'A red cartoon frog, 4k'
_lowercase : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior', torch_dtype=torch.floataa)
pipe_prior.to(lowerCamelCase)
_lowercase : Optional[Any] = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1', torch_dtype=torch.floataa)
_lowercase : List[Any] = pipeline.to(lowerCamelCase)
pipeline.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = torch.Generator(device='cpu').manual_seed(0)
_lowercase , _lowercase : List[Any] = pipe_prior(
lowerCamelCase, generator=lowerCamelCase, num_inference_steps=5, negative_prompt='', ).to_tuple()
_lowercase : Union[str, Any] = pipeline(
lowerCamelCase, image=lowerCamelCase, image_embeds=lowerCamelCase, negative_image_embeds=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=1_00, height=7_68, width=7_68, strength=0.2, output_type='np', )
_lowercase : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase)
| 21 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "openai/whisper-base"
__UpperCamelCase = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase = "transcriber"
__UpperCamelCase = WhisperProcessor
__UpperCamelCase = WhisperForConditionalGeneration
__UpperCamelCase = ["audio"]
__UpperCamelCase = ["text"]
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Union[str, Any]):
'''simple docstring'''
return self.pre_processor(lowercase_ , return_tensors='''pt''').input_features
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : List[Any]):
'''simple docstring'''
return self.model.generate(inputs=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : List[str]):
'''simple docstring'''
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_)[0]
| 91 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _lowerCamelCase( _a ):
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
super().__init__(*lowerCamelCase, **lowerCamelCase)
requires_backends(self, 'vision')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def UpperCamelCase ( self, lowerCamelCase=None) -> int:
"""simple docstring"""
_lowercase : Dict = {}
if top_k is not None:
_lowercase : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self, lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
return super().__call__(lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = load_image(lowerCamelCase)
_lowercase : List[str] = self.image_processor(images=lowerCamelCase, return_tensors=self.framework)
return model_inputs
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.model(**lowerCamelCase)
return model_outputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=5) -> Dict:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowercase : List[Any] = self.model.config.num_labels
if self.framework == "pt":
_lowercase : int = model_outputs.logits.softmax(-1)[0]
_lowercase , _lowercase : Union[str, Any] = probs.topk(lowerCamelCase)
elif self.framework == "tf":
_lowercase : int = stable_softmax(model_outputs.logits, axis=-1)[0]
_lowercase : List[Any] = tf.math.top_k(lowerCamelCase, k=lowerCamelCase)
_lowercase , _lowercase : Any = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''')
_lowercase : str = scores.tolist()
_lowercase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase, lowerCamelCase)]
| 21 | 0 |
from __future__ import annotations
UpperCamelCase__ = 1.6021E-19 # units = C
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
_lowercase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase_( ) -> Optional[int]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_lowercase : Union[str, Any] = "Usage of script: script_name <size_of_canvas:int>"
_lowercase : Union[str, Any] = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Union[str, Any] = [[False for i in range(__SCREAMING_SNAKE_CASE )] for j in range(__SCREAMING_SNAKE_CASE )]
return canvas
def snake_case_ ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
"""simple docstring"""
for i, row in enumerate(__SCREAMING_SNAKE_CASE ):
for j, _ in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = bool(random.getrandbits(1 ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
"""simple docstring"""
lowercase_ : str = np.array(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__SCREAMING_SNAKE_CASE ):
for c, pt in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = __judge_point(
__SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowercase_ : Dict = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowercase_ : list[list[bool]] = current_canvas.tolist()
return return_canvas
def snake_case_ ( __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[list[bool]] ):
"""simple docstring"""
lowercase_ : List[str] = 0
lowercase_ : Tuple = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowercase_ : Union[str, Any] = pt
if pt:
if alive < 2:
lowercase_ : Optional[int] = False
elif alive == 2 or alive == 3:
lowercase_ : int = True
elif alive > 3:
lowercase_ : Any = False
else:
if alive == 3:
lowercase_ : str = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_lowercase : Tuple = int(sys.argv[1])
# main working structure of this module.
_lowercase : int = create_canvas(canvas_size)
seed(c)
_lowercase , _lowercase : List[str] = plt.subplots()
fig.show()
_lowercase : str = ListedColormap(["w", "k"])
try:
while True:
_lowercase : str = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 93 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=64, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=3, lowerCamelCase=4, lowerCamelCase=None, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase=1, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Any = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Optional[Any] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : str = vocab_size
_lowercase : List[str] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : List[Any] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Any = num_choices
_lowercase : Tuple = scope
_lowercase : Optional[Any] = q_groups
_lowercase : List[str] = k_groups
_lowercase : Optional[int] = v_groups
_lowercase : List[str] = post_attention_groups
_lowercase : Union[str, Any] = intermediate_groups
_lowercase : int = output_groups
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Any = None
if self.use_input_mask:
_lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : Dict = None
_lowercase : int = None
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = SqueezeBertModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase, lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Dict = SqueezeBertForMaskedLM(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForQuestionAnswering(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_labels
_lowercase : int = SqueezeBertForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Any = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.num_labels
_lowercase : List[str] = SqueezeBertForTokenClassification(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = self.num_choices
_lowercase : str = SqueezeBertForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Dict = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase_ : Optional[int] = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Tuple = False
lowercase_ : List[str] = True
lowercase_ : int = False
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = SqueezeBertModelTester(self)
_lowercase : Dict = ConfigTester(self, config_class=lowerCamelCase, dim=37)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = SqueezeBertModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli')
_lowercase : Optional[int] = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]])
_lowercase : List[str] = model(lowerCamelCase)[0]
_lowercase : Union[str, Any] = torch.Size((1, 3))
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : Tuple = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]])
self.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-4))
| 21 | 0 |
def __lowerCamelCase ( UpperCAmelCase_ : int = 1000 ):
"""simple docstring"""
a , a :int = 1, 1
a :Any = 2
while True:
a :Optional[int] = 0
a :str = fa + fa
a , a :List[Any] = fa, f
index += 1
for _ in str(UpperCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 94 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Dict = JukeboxTokenizer
lowercase_ : Dict = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
import torch
_lowercase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics')
_lowercase : Optional[Any] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
@require_torch
def UpperCamelCase ( self) -> int:
"""simple docstring"""
import torch
_lowercase : List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics')
_lowercase : List[str] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
| 21 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 95 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowercase_ : int = """ssube/stable-diffusion-x4-upscaler-onnx"""
def UpperCamelCase ( self, lowerCamelCase=0) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = floats_tensor((1, 3, 1_28, 1_28), rng=random.Random(lowerCamelCase))
_lowercase : Union[str, Any] = torch.manual_seed(lowerCamelCase)
_lowercase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : str = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : List[Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : int = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[Any] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs()
_lowercase : List[str] = pipe(**lowerCamelCase).images
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
# using the PNDM scheduler by default
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np', )
_lowercase : List[Any] = output.images
_lowercase : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : List[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
_lowercase : str = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', subfolder='scheduler')
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', scheduler=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=20, generator=lowerCamelCase, output_type='np', )
_lowercase : str = output.images
_lowercase : str = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 21 | 0 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase, _lowerCamelCase : List[str] = emb.weight.shape
_lowerCamelCase : Union[str, Any] = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
_lowerCamelCase : List[str] = emb.weight.data
return lin_layer
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[str] = torch.load(lowercase__ , map_location='cpu' )
_lowerCamelCase : Tuple = Namespace(**checkpoint['cfg']['model'] )
_lowerCamelCase : Optional[int] = checkpoint['model']
remove_ignore_keys_(lowercase__ )
_lowerCamelCase : int = state_dict['decoder.embed_tokens.weight'].shape[0]
_lowerCamelCase : Union[str, Any] = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
_lowerCamelCase : Tuple = XGLMConfig(
vocab_size=lowercase__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_lowerCamelCase : Union[str, Any] = XGLMForCausalLM(lowercase__ )
_lowerCamelCase : Optional[Any] = model.load_state_dict(lowercase__ , strict=lowercase__ )
print(lowercase__ )
_lowerCamelCase : Union[str, Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowercase__ = parser.parse_args()
lowercase__ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path) | 96 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = 1
_lowercase : Any = 3
_lowercase : Tuple = (32, 32)
_lowercase : Tuple = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(lowerCamelCase)
return image
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
return model
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : str = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
return model
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = RobertaSeriesConfig(
hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=50_06, )
return RobertaSeriesModelWithTransformation(lowerCamelCase)
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
def extract(*lowerCamelCase, **lowerCamelCase):
class _lowerCamelCase:
def __init__( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = torch.ones([0])
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
self.pixel_values.to(lowerCamelCase)
return self
return Out()
return extract
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : List[Any] = self.dummy_cond_unet
_lowercase : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase)
_lowercase : Optional[Any] = self.dummy_vae
_lowercase : List[Any] = self.dummy_text_encoder
_lowercase : Any = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
_lowercase : Tuple = 77
_lowercase : int = self.dummy_image.to(lowerCamelCase)
_lowercase : int = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowercase : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
_lowercase : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase)
_lowercase : Optional[int] = alt_pipe.to(lowerCamelCase)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = 'A painting of a squirrel eating a burger'
_lowercase : Dict = torch.Generator(device=lowerCamelCase).manual_seed(0)
_lowercase : Any = alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='np', image=lowerCamelCase, )
_lowercase : Optional[int] = output.images
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(0)
_lowercase : Optional[Any] = alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='np', image=lowerCamelCase, return_dict=lowerCamelCase, )[0]
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
_lowercase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = self.dummy_cond_unet
_lowercase : Tuple = PNDMScheduler(skip_prk_steps=lowerCamelCase)
_lowercase : str = self.dummy_vae
_lowercase : Optional[Any] = self.dummy_text_encoder
_lowercase : Optional[Any] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
_lowercase : Optional[Any] = 77
_lowercase : str = self.dummy_image.to(lowerCamelCase)
# put models in fp16
_lowercase : List[str] = unet.half()
_lowercase : List[Any] = vae.half()
_lowercase : Any = bert.half()
# make sure here that pndm scheduler skips prk
_lowercase : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
_lowercase : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase)
_lowercase : Any = alt_pipe.to(lowerCamelCase)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : int = 'A painting of a squirrel eating a burger'
_lowercase : Optional[Any] = torch.manual_seed(0)
_lowercase : Union[str, Any] = alt_pipe(
[prompt], generator=lowerCamelCase, num_inference_steps=2, output_type='np', image=lowerCamelCase, ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
# resize to resolution that is divisible by 8 but not 16 or 32
_lowercase : str = init_image.resize((7_60, 5_04))
_lowercase : Optional[int] = 'BAAI/AltDiffusion'
_lowercase : str = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
pipe.enable_attention_slicing()
_lowercase : List[str] = 'A fantasy landscape, trending on artstation'
_lowercase : Any = torch.manual_seed(0)
_lowercase : Dict = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCamelCase, output_type='np', )
_lowercase : List[str] = output.images[0]
_lowercase : Tuple = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_lowercase : Optional[Any] = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : str = init_image.resize((7_68, 5_12))
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy')
_lowercase : str = 'BAAI/AltDiffusion'
_lowercase : Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
pipe.enable_attention_slicing()
_lowercase : int = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : int = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCamelCase, output_type='np', )
_lowercase : Union[str, Any] = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1E-2
| 21 | 0 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 97 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowerCamelCase( _a ):
lowercase_ : Dict = """deformable_detr"""
lowercase_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=3, lowerCamelCase=3_00, lowerCamelCase=10_24, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase="resnet50", lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=False, lowerCamelCase=3_00, lowerCamelCase=False, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : List[str] = backbone_config.get('model_type')
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Optional[int] = config_class.from_dict(lowerCamelCase)
_lowercase : Tuple = use_timm_backbone
_lowercase : List[str] = backbone_config
_lowercase : Tuple = num_channels
_lowercase : Optional[Any] = num_queries
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : Optional[int] = d_model
_lowercase : int = encoder_ffn_dim
_lowercase : List[Any] = encoder_layers
_lowercase : str = encoder_attention_heads
_lowercase : str = decoder_ffn_dim
_lowercase : Optional[Any] = decoder_layers
_lowercase : List[str] = decoder_attention_heads
_lowercase : Optional[int] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Any = activation_function
_lowercase : Optional[int] = init_std
_lowercase : int = init_xavier_std
_lowercase : Union[str, Any] = encoder_layerdrop
_lowercase : Tuple = auxiliary_loss
_lowercase : Union[str, Any] = position_embedding_type
_lowercase : str = backbone
_lowercase : List[Any] = use_pretrained_backbone
_lowercase : Any = dilation
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : Dict = encoder_n_points
_lowercase : Dict = decoder_n_points
_lowercase : Dict = two_stage
_lowercase : Union[str, Any] = two_stage_num_proposals
_lowercase : str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : Tuple = class_cost
_lowercase : int = bbox_cost
_lowercase : Optional[int] = giou_cost
# Loss coefficients
_lowercase : Optional[Any] = mask_loss_coefficient
_lowercase : Dict = dice_loss_coefficient
_lowercase : Tuple = bbox_loss_coefficient
_lowercase : Optional[int] = giou_loss_coefficient
_lowercase : Union[str, Any] = eos_coefficient
_lowercase : Union[str, Any] = focal_alpha
_lowercase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
_lowercase : Union[str, Any] = self.backbone_config.to_dict()
_lowercase : Tuple = self.__class__.model_type
return output
| 21 | 0 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : List[str] = logging.get_logger()
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = field(default_factory=__UpperCAmelCase )
snake_case__ = field(default_factory=__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : Tensor ,lowerCamelCase__ : Tensor ):
UpperCAmelCase__ = len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase__ ,nn.Convad ) or isinstance(lowerCamelCase__ ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCamelCase__ )
def __call__( self : str ,lowerCamelCase__ : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def __lowerCAmelCase ( self : Any ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda lowerCamelCase__ : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = 42
snake_case__ = 0
snake_case__ = field(default_factory=__UpperCAmelCase )
snake_case__ = field(default_factory=__UpperCAmelCase )
def __call__( self : Optional[Any] ,lowerCamelCase__ : Tensor ):
UpperCAmelCase__ = Tracker(self.dest )(lowerCamelCase__ ).parametrized
UpperCAmelCase__ = Tracker(self.src )(lowerCamelCase__ ).parametrized
UpperCAmelCase__ = list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.src_skip ,lowerCamelCase__ ) )
UpperCAmelCase__ = list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.dest_skip ,lowerCamelCase__ ) )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(lowerCamelCase__ )} operations while'''
f''' destination module has {len(lowerCamelCase__ )}.''' )
for dest_m, src_m in zip(lowerCamelCase__ ,lowerCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True ):
print(f'''Converting {name}...''' )
with torch.no_grad():
UpperCAmelCase__ = timm.create_model(lowerCamelCase , pretrained=lowerCamelCase ).eval()
UpperCAmelCase__ = ResNetForImageClassification(lowerCamelCase ).eval()
UpperCAmelCase__ = ModuleTransfer(src=lowerCamelCase , dest=lowerCamelCase )
UpperCAmelCase__ = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(lowerCamelCase )
assert torch.allclose(from_model(lowerCamelCase ) , our_model(lowerCamelCase ).logits ), "The model logits don't match the original one."
UpperCAmelCase__ = f'''resnet{"-".join(name.split("resnet" ) )}'''
print(lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
# we can use the convnext one
UpperCAmelCase__ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
print(f'''Pushed {checkpoint_name}''' )
def a_ ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True ):
UpperCAmelCase__ = 'imagenet-1k-id2label.json'
UpperCAmelCase__ = 1_0_0_0
UpperCAmelCase__ = (1, num_labels)
UpperCAmelCase__ = 'huggingface/label-files'
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase__ = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = partial(lowerCamelCase , num_labels=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase )
UpperCAmelCase__ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCamelCase , names_to_config[model_name] , lowerCamelCase , lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
lowerCAmelCase__ : List[Any] = parser.parse_args()
lowerCAmelCase__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 98 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[str] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 | 0 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=14 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[Any]:
'''simple docstring'''
a__ : Dict = parent
a__ : List[Any] = batch_size
a__ : Union[str, Any] = seq_length
a__ : Optional[Any] = is_training
a__ : Dict = use_token_type_ids
a__ : Any = use_input_mask
a__ : Optional[int] = use_labels
a__ : str = use_mc_token_ids
a__ : int = vocab_size
a__ : Dict = hidden_size
a__ : Optional[int] = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : Any = intermediate_size
a__ : List[Any] = hidden_act
a__ : Dict = hidden_dropout_prob
a__ : List[str] = attention_probs_dropout_prob
a__ : str = max_position_embeddings
a__ : str = type_vocab_size
a__ : Union[str, Any] = type_sequence_label_size
a__ : List[Any] = initializer_range
a__ : Dict = num_labels
a__ : int = num_choices
a__ : Any = scope
a__ : Optional[int] = self.vocab_size - 1
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ : int = None
if self.use_input_mask:
a__ : int = random_attention_mask([self.batch_size, self.seq_length])
a__ : List[Any] = None
if self.use_token_type_ids:
a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__ : Any = None
if self.use_mc_token_ids:
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length)
a__ : Optional[Any] = None
a__ : List[Any] = None
a__ : Dict = None
if self.use_labels:
a__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices)
a__ : str = self.get_config()
a__ : int = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __lowercase ( self) -> Dict:
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase) -> Optional[int]:
'''simple docstring'''
a__ : str = CTRLModel(config=lowercase)
model.to(lowercase)
model.eval()
model(lowercase , token_type_ids=lowercase , head_mask=lowercase)
model(lowercase , token_type_ids=lowercase)
a__ : str = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values) , config.n_layer)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase) -> str:
'''simple docstring'''
a__ : Dict = CTRLLMHeadModel(lowercase)
model.to(lowercase)
model.eval()
a__ : Tuple = model(lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Dict = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Optional[int] = config_and_inputs
a__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , *lowercase) -> List[str]:
'''simple docstring'''
a__ : Any = self.num_labels
a__ : List[Any] = CTRLForSequenceClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : Dict = model(lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : List[str] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__A : int = (CTRLLMHeadModel,) if is_torch_available() else ()
__A : Dict = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : Union[str, Any] = True
__A : Any = False
__A : Optional[Any] = False
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Any = CTRLModelTester(self)
a__ : Optional[Any] = ConfigTester(self , config_class=lowercase , n_embd=37)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __lowercase ( self) -> Tuple:
'''simple docstring'''
pass
@slow
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[int] = CTRLModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
@unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
pass
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = CTRLLMHeadModel.from_pretrained('ctrl')
model.to(lowercase)
a__ : int = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=lowercase) # Legal the president is
a__ : List[str] = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ : str = model.generate(lowercase , do_sample=lowercase)
self.assertListEqual(output_ids[0].tolist() , lowercase)
| 99 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
_lowercase : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowercase : Tuple = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowercase : str = np.concatenate(lowerCamelCase_ , axis=0 )
_lowercase : Dict = np.array(lowerCamelCase_ ).astype(np.floataa ) / 2_55.0
_lowercase : Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowercase : str = 2.0 * image - 1.0
_lowercase : Tuple = torch.from_numpy(lowerCamelCase_ )
elif isinstance(image[0] , torch.Tensor ):
_lowercase : Any = torch.cat(lowerCamelCase_ , dim=0 )
return image
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0.99_95 ) -> Tuple:
if not isinstance(lowerCamelCase_ , np.ndarray ):
_lowercase : List[Any] = True
_lowercase : Any = va.device
_lowercase : Union[str, Any] = va.cpu().numpy()
_lowercase : int = va.cpu().numpy()
_lowercase : int = np.sum(va * va / (np.linalg.norm(lowerCamelCase_ ) * np.linalg.norm(lowerCamelCase_ )) )
if np.abs(lowerCamelCase_ ) > DOT_THRESHOLD:
_lowercase : Any = (1 - t) * va + t * va
else:
_lowercase : Dict = np.arccos(lowerCamelCase_ )
_lowercase : str = np.sin(lowerCamelCase_ )
_lowercase : int = theta_a * t
_lowercase : Dict = np.sin(lowerCamelCase_ )
_lowercase : Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowercase : List[Any] = sin_theta_t / sin_theta_a
_lowercase : Dict = sa * va + sa * va
if inputs_are_torch:
_lowercase : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
return va
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
for param in model.parameters():
_lowercase : Any = value
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCamelCase, text_encoder=lowerCamelCase, clip_model=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, coca_model=lowerCamelCase, coca_tokenizer=lowerCamelCase, coca_transform=lowerCamelCase, )
_lowercase : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size, lowerCamelCase)
else feature_extractor.size['shortest_edge']
)
_lowercase : Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
set_requires_grad(self.text_encoder, lowerCamelCase)
set_requires_grad(self.clip_model, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase = "auto") -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowercase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = min(int(num_inference_steps * strength), lowerCamelCase)
_lowercase : List[Any] = max(num_inference_steps - init_timestep, 0)
_lowercase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase, torch.Tensor):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(lowerCamelCase)}''')
_lowercase : Any = image.to(device=lowerCamelCase, dtype=lowerCamelCase)
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(lowerCamelCase)
]
_lowercase : int = torch.cat(lowerCamelCase, dim=0)
else:
_lowercase : int = self.vae.encode(lowerCamelCase).latent_dist.sample(lowerCamelCase)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : str = 0.1_8_2_1_5 * init_latents
_lowercase : List[str] = init_latents.repeat_interleave(lowerCamelCase, dim=0)
_lowercase : List[str] = randn_tensor(init_latents.shape, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
# get latents
_lowercase : Any = self.scheduler.add_noise(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : str = init_latents
return latents
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = self.coca_transform(lowerCamelCase).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_lowercase : List[str] = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
_lowercase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('<end_of_text>')[0].replace('<start_of_text>', '').rstrip(' .,')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Tuple = self.feature_extractor.preprocess(lowerCamelCase)
_lowercase : List[str] = torch.from_numpy(clip_image_input['pixel_values'][0]).unsqueeze(0).to(self.device).half()
_lowercase : int = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : int = image_embeddings_clip.repeat_interleave(lowerCamelCase, dim=0)
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : List[Any] = latents.detach().requires_grad_()
_lowercase : Union[str, Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Tuple = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_lowercase : Any = self.scheduler.alphas_cumprod[timestep]
_lowercase : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowercase : List[str] = torch.sqrt(lowerCamelCase)
_lowercase : Dict = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, lowerCamelCase):
_lowercase : Dict = self.scheduler.sigmas[index]
_lowercase : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler)} not supported''')
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Dict = 1 / 0.1_8_2_1_5 * sample
_lowercase : Optional[Any] = self.vae.decode(lowerCamelCase).sample
_lowercase : int = (image / 2 + 0.5).clamp(0, 1)
_lowercase : Any = transforms.Resize(self.feature_extractor_size)(lowerCamelCase)
_lowercase : Optional[Any] = self.normalize(lowerCamelCase).to(latents.dtype)
_lowercase : List[str] = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : Optional[Any] = spherical_dist_loss(lowerCamelCase, lowerCamelCase).mean() * clip_guidance_scale
_lowercase : str = -torch.autograd.grad(lowerCamelCase, lowerCamelCase)[0]
if isinstance(self.scheduler, lowerCamelCase):
_lowercase : Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowercase : List[str] = noise_pred_original
else:
_lowercase : List[Any] = noise_pred_original - torch.sqrt(lowerCamelCase) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = 5_12, lowerCamelCase = 5_12, lowerCamelCase = 0.6, lowerCamelCase = 50, lowerCamelCase = 7.5, lowerCamelCase = 1, lowerCamelCase = 0.0, lowerCamelCase = 1_00, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, lowerCamelCase = 0.8, lowerCamelCase = 0.1, lowerCamelCase = 0.1, ) -> int:
"""simple docstring"""
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(lowerCamelCase)} generators.''')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''')
if isinstance(lowerCamelCase, torch.Generator) and batch_size > 1:
_lowercase : Dict = [generator] + [None] * (batch_size - 1)
_lowercase : Optional[int] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowercase : Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowercase : str = ', '.join(lowerCamelCase)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : List[Any] = self.get_image_description(lowerCamelCase)
if style_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : Dict = self.get_image_description(lowerCamelCase)
# get prompt text embeddings for content and style
_lowercase : Optional[int] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : Optional[int] = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_lowercase : Union[str, Any] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : List[Any] = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_lowercase : Any = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# duplicate text embeddings for each generation per prompt
_lowercase : Dict = text_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# set timesteps
_lowercase : Dict = 'offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_offset:
_lowercase : Any = 1
self.scheduler.set_timesteps(lowerCamelCase, **lowerCamelCase)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_lowercase , _lowercase : List[Any] = self.get_timesteps(lowerCamelCase, lowerCamelCase, self.device)
_lowercase : str = timesteps[:1].repeat(lowerCamelCase)
# Preprocess image
_lowercase : str = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : int = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : Optional[int] = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
if clip_guidance_scale > 0:
_lowercase : Optional[int] = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = slerp(
lowerCamelCase, lowerCamelCase, lowerCamelCase)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowercase : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowercase : Tuple = content_text_input.input_ids.shape[-1]
_lowercase : Union[str, Any] = self.tokenizer([''], padding='max_length', max_length=lowerCamelCase, return_tensors='pt')
_lowercase : int = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_lowercase : Union[str, Any] = uncond_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowercase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowercase : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowercase : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowercase : List[Any] = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='cpu', dtype=lowerCamelCase).to(
self.device)
else:
_lowercase : Any = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase)
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
_lowercase : Tuple = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_lowercase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowercase : Dict = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_eta:
_lowercase : List[Any] = eta
# check if the scheduler accepts generator
_lowercase : Dict = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_lowercase : str = generator
with self.progress_bar(total=lowerCamelCase):
for i, t in enumerate(lowerCamelCase):
# expand the latents if we are doing classifier free guidance
_lowercase : List[str] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_lowercase : List[Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Dict = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowercase , _lowercase : Optional[Any] = noise_pred.chunk(2)
_lowercase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowercase : Tuple = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_lowercase , _lowercase : List[Any] = self.cond_fn(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, )
# compute the previous noisy sample x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Any = 1 / 0.1_8_2_1_5 * latents
_lowercase : List[str] = self.vae.decode(lowerCamelCase).sample
_lowercase : Tuple = (image / 2 + 0.5).clamp(0, 1)
_lowercase : List[Any] = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase : List[Any] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase)
| 21 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__magic_name__ = logging.getLogger(__name__)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return (preds == labels).mean()
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__lowercase : Optional[str] = field(
default=__a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowercase : Optional[str] = field(
default=__a , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__lowercase : Optional[str] = field(
default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
__lowercase : str = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
__lowercase : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__lowercase : bool = field(
default=__a , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase_ )
# Set seed
set_seed(training_args.seed )
try:
__SCREAMING_SNAKE_CASE = processors[data_args.task_name]()
__SCREAMING_SNAKE_CASE = processor.get_labels()
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
__SCREAMING_SNAKE_CASE = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__SCREAMING_SNAKE_CASE = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCamelCase_ ) -> Dict:
__SCREAMING_SNAKE_CASE = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCamelCase_ , p.label_ids )}
# Data collator
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(UpperCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , compute_metrics=UpperCamelCase_ , data_collator=UpperCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE = trainer.evaluate()
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(UpperCamelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , UpperCamelCase_ , UpperCamelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(UpperCamelCase_ )
return results
def _lowerCAmelCase ( UpperCamelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 100 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = ConsistencyModelPipeline
lowercase_ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase_ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase_ : List[str] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet', )
return unet
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet_class_cond', )
return unet
def UpperCamelCase ( self, lowerCamelCase=False) -> Dict:
"""simple docstring"""
if class_cond:
_lowercase : Union[str, Any] = self.dummy_cond_unet
else:
_lowercase : Union[str, Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
_lowercase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Tuple:
"""simple docstring"""
if str(lowerCamelCase).startswith('mps'):
_lowercase : str = torch.manual_seed(lowerCamelCase)
else:
_lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Optional[int] = self.get_dummy_components()
_lowercase : str = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : Dict = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : int = image[0, -3:, -3:, -1]
_lowercase : Dict = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase)
_lowercase : Any = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Any = 0
_lowercase : List[str] = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Any = self.get_dummy_components()
_lowercase : Optional[Any] = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : List[str] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Union[str, Any] = 1
_lowercase : Tuple = None
_lowercase : Tuple = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase)
_lowercase : Dict = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : Optional[Any] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Tuple = 1
_lowercase : int = None
_lowercase : Tuple = 0
_lowercase : Dict = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase=False, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = torch.manual_seed(lowerCamelCase)
_lowercase : str = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
_lowercase : Optional[Any] = self.get_fixed_latents(seed=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase, shape=lowerCamelCase)
_lowercase : Tuple = latents
return inputs
def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Any:
"""simple docstring"""
if type(lowerCamelCase) == str:
_lowercase : Union[str, Any] = torch.device(lowerCamelCase)
_lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : List[str] = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
return latents
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Any = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : Optional[Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Union[str, Any] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_inputs()
_lowercase : int = 1
_lowercase : Optional[Any] = None
_lowercase : str = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : List[Any] = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
@require_torch_a
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Optional[int] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase):
_lowercase : Dict = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@require_torch_a
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : int = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase)
_lowercase : int = 1
_lowercase : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase):
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : int = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 21 | 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Optional[Any] =MobileBertTokenizer
lowercase_ : Tuple =MobileBertTokenizerFast
lowercase_ : Optional[int] =True
lowercase_ : str =True
lowercase_ : Dict =filter_non_english
lowercase_ : Union[str, Any] ='''google/mobilebert-uncased'''
def A__ ( self):
super().setUp()
lowercase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
lowercase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A__ ( self ,A__):
lowercase = '''UNwant\u00E9d,running'''
lowercase = '''unwanted, running'''
return input_text, output_text
def A__ ( self):
lowercase = self.tokenizer_class(self.vocab_file)
lowercase = tokenizer.tokenize('''UNwant\u00E9d,running''')
self.assertListEqual(A__ ,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__) ,[9, 6, 7, 1_2, 1_0, 1_1])
def A__ ( self):
if not self.test_rust_tokenizer:
return
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = '''UNwant\u00E9d,running'''
lowercase = tokenizer.tokenize(A__)
lowercase = rust_tokenizer.tokenize(A__)
self.assertListEqual(A__ ,A__)
lowercase = tokenizer.encode(A__ ,add_special_tokens=A__)
lowercase = rust_tokenizer.encode(A__ ,add_special_tokens=A__)
self.assertListEqual(A__ ,A__)
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(A__)
lowercase = rust_tokenizer.encode(A__)
self.assertListEqual(A__ ,A__)
# With lower casing
lowercase = self.get_tokenizer(do_lower_case=A__)
lowercase = self.get_rust_tokenizer(do_lower_case=A__)
lowercase = '''UNwant\u00E9d,running'''
lowercase = tokenizer.tokenize(A__)
lowercase = rust_tokenizer.tokenize(A__)
self.assertListEqual(A__ ,A__)
lowercase = tokenizer.encode(A__ ,add_special_tokens=A__)
lowercase = rust_tokenizer.encode(A__ ,add_special_tokens=A__)
self.assertListEqual(A__ ,A__)
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(A__)
lowercase = rust_tokenizer.encode(A__)
self.assertListEqual(A__ ,A__)
def A__ ( self):
lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''') ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''h\u00E9llo'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__ ,never_split=['''[UNK]'''])
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''') ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''])
def A__ ( self):
lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowercase = {}
for i, token in enumerate(A__):
lowercase = i
lowercase = WordpieceTokenizer(vocab=A__ ,unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') ,[])
self.assertListEqual(tokenizer.tokenize('''unwanted running''') ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.tokenize('''unwantedX running''') ,['''[UNK]''', '''runn''', '''##ing'''])
def A__ ( self):
self.assertTrue(_is_whitespace(''' '''))
self.assertTrue(_is_whitespace('''\t'''))
self.assertTrue(_is_whitespace('''\r'''))
self.assertTrue(_is_whitespace('''\n'''))
self.assertTrue(_is_whitespace('''\u00A0'''))
self.assertFalse(_is_whitespace('''A'''))
self.assertFalse(_is_whitespace('''-'''))
def A__ ( self):
self.assertTrue(_is_control('''\u0005'''))
self.assertFalse(_is_control('''A'''))
self.assertFalse(_is_control(''' '''))
self.assertFalse(_is_control('''\t'''))
self.assertFalse(_is_control('''\r'''))
def A__ ( self):
self.assertTrue(_is_punctuation('''-'''))
self.assertTrue(_is_punctuation('''$'''))
self.assertTrue(_is_punctuation('''`'''))
self.assertTrue(_is_punctuation('''.'''))
self.assertFalse(_is_punctuation('''A'''))
self.assertFalse(_is_punctuation(''' '''))
def A__ ( self):
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']])
self.assertListEqual(
[rust_tokenizer.tokenize(A__) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']])
@slow
def A__ ( self):
lowercase = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''')
lowercase = tokenizer.encode('''sequence builders''' ,add_special_tokens=A__)
lowercase = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__ ,A__)
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def A__ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase = tokenizer_r.encode_plus(
A__ ,return_attention_mask=A__ ,return_token_type_ids=A__ ,return_offsets_mapping=A__ ,add_special_tokens=A__ ,)
lowercase = tokenizer_r.do_lower_case if hasattr(A__ ,'''do_lower_case''') else False
lowercase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['''input_ids''']))
self.assertEqual([e[0] for e in expected_results] ,tokens['''offset_mapping'''])
def A__ ( self):
lowercase = ['''的''', '''人''', '''有''']
lowercase = ''''''.join(A__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = True
lowercase = self.tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = tokenizer_p.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_r.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_r.convert_ids_to_tokens(A__)
lowercase = tokenizer_p.convert_ids_to_tokens(A__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ ,A__)
self.assertListEqual(A__ ,A__)
lowercase = False
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = self.tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = tokenizer_r.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_p.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_r.convert_ids_to_tokens(A__)
lowercase = tokenizer_p.convert_ids_to_tokens(A__)
# it is expected that only the first Chinese character is not preceded by "##".
lowercase = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(A__)
]
self.assertListEqual(A__ ,A__)
self.assertListEqual(A__ ,A__)
| 101 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : int = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> tuple[int, int]:
_lowercase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_lowercase : int = x_den * y_den * z_den
_lowercase : int = gcd(lowerCamelCase_ , lowerCamelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_( lowerCamelCase_ = 35 ) -> int:
_lowercase : set = set()
_lowercase : int
_lowercase : Fraction = Fraction(0 )
_lowercase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_lowercase : int = x_num * y_den + x_den * y_num
_lowercase : int = x_den * y_den
_lowercase : str = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : List[Any] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_lowercase : List[Any] = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : int = int(sqrt(lowerCamelCase_ ) )
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Optional[int] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=-1
_lowercase : Any = x_num * y_num
_lowercase : str = x_den * y_num + x_num * y_den
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : int = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : str = x_num * x_num * y_num * y_num
_lowercase : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : List[str] = int(sqrt(lowerCamelCase_ ) )
_lowercase : Union[str, Any] = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Tuple = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
for num, den in unique_s:
total += Fraction(lowerCamelCase_ , lowerCamelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=2 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=False , a_=True , a_="None" , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : str = parent
__snake_case : List[str] = batch_size
__snake_case : Optional[int] = seq_length
__snake_case : List[Any] = is_training
__snake_case : Optional[Any] = use_input_mask
__snake_case : Optional[int] = use_token_type_ids
__snake_case : Optional[int] = use_labels
__snake_case : Dict = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : int = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : Any = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Tuple = num_choices
__snake_case : int = relative_attention
__snake_case : List[Any] = position_biased_input
__snake_case : Optional[Any] = pos_att_type
__snake_case : str = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : List[str] = None
if self.use_input_mask:
__snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : List[str] = None
if self.use_token_type_ids:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[int] = None
__snake_case : Union[str, Any] = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Dict = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : int = TFDebertaVaModel(config=a_ )
__snake_case : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__snake_case : int = [input_ids, input_mask]
__snake_case : List[str] = model(a_ )
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = TFDebertaVaForMaskedLM(config=a_ )
__snake_case : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case : Optional[Any] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : List[Any] = TFDebertaVaForSequenceClassification(config=a_ )
__snake_case : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case : int = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = self.num_labels
__snake_case : Any = TFDebertaVaForTokenClassification(config=a_ )
__snake_case : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case : Dict = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : int = TFDebertaVaForQuestionAnswering(config=a_ )
__snake_case : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case : int = model(a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[str] = config_and_inputs
__snake_case : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = TFDebertaVaModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(a_ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
__snake_case : Tuple = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__snake_case : Tuple = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__snake_case : str = model(a_ , attention_mask=a_ )[0]
__snake_case : List[str] = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , a_ , atol=1E-4 )
| 102 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A__ : Any = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCamelCase( __UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any]=None ):
require_version(deps[pkg] ,__UpperCamelCase )
| 103 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
if len(lowerCamelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
_lowercase : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase__ : VQModel ,lowercase__ : UNetaDModel ,lowercase__ : DDIMScheduler ):
super().__init__()
self.register_modules(vqvae=lowercase__ ,unet=lowercase__ ,scheduler=lowercase__ )
@torch.no_grad()
def __call__( self : Tuple ,lowercase__ : int = 1 ,lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowercase__ : float = 0.0 ,lowercase__ : int = 5_0 ,lowercase__ : Optional[str] = "pil" ,lowercase__ : bool = True ,**lowercase__ : List[str] ,):
__lowercase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,generator=lowercase__ ,)
__lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowercase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
__lowercase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
__lowercase = self.scheduler.scale_model_input(lowercase__ ,lowercase__ )
# predict the noise residual
__lowercase = self.unet(lowercase__ ,lowercase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
# decode the image latents with the VAE
__lowercase = self.vqvae.decode(lowercase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0 ,1 )
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 104 |
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCamelCase_( lowerCamelCase_ = 200_0000 ) -> int:
_lowercase : list[int] = [0]
_lowercase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
_lowercase : int = 0
# an estimate of b, using the quadratic formula
_lowercase : float
# the largest integer less than b_estimate
_lowercase : int
# the largest integer less than b_estimate
_lowercase : int
# the triangle number corresponding to b_floor
_lowercase : int
# the triangle number corresponding to b_ceil
_lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowercase : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowercase : List[str] = floor(lowerCamelCase_ )
_lowercase : Dict = ceil(lowerCamelCase_ )
_lowercase : List[str] = triangle_numbers[b_floor]
_lowercase : List[str] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Union[str, Any] = triangle_b_first_guess * triangle_a
_lowercase : Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Any = triangle_b_second_guess * triangle_a
_lowercase : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 21 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
a : str = logging.get_logger(__name__)
a : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a : Dict = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
a : Optional[Any] = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
a : Optional[int] = {F'''funnel-transformer/{name}''': 512 for name in _model_names}
a : List[str] = {F'''funnel-transformer/{name}''': {'''do_lower_case''': True} for name in _model_names}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES
lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[str] =FunnelTokenizer
lowerCamelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int =2
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<sep>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<cls>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__="##" , **lowerCAmelCase__ , ) -> int:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
a : Dict = getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
a : int = do_lower_case
a : List[Any] = strip_accents
a : Optional[Any] = tokenize_chinese_chars
a : List[Any] = normalizer_class(**lowerCAmelCase__ )
a : str = do_lower_case
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Any:
a : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : List[Any] = [self.sep_token_id]
a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
a : Optional[Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 105 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _lowerCamelCase:
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : str = np.abs((a - b)).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Any = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase , _lowercase : Union[str, Any] = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : str = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : Tuple = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase)
_lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : Tuple = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : str = after_output[0]
_lowercase : Optional[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-3)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase , _lowercase : Any = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : Tuple = model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase)
_lowercase : int = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase), vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : Optional[Any] = to_atuple(vision_model.config.image_size)
_lowercase : Any = to_atuple(vision_model.config.patch_size)
_lowercase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len))
_lowercase : List[str] = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase), text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
pt_model.to(lowerCamelCase)
pt_model.eval()
# prepare inputs
_lowercase : Any = inputs_dict
_lowercase : Optional[int] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
_lowercase : Tuple = pt_model(**lowerCamelCase).to_tuple()
_lowercase : Any = fx_model(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase)
_lowercase : List[Any] = fx_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase)
_lowercase : List[Any] = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase)
pt_model_loaded.to(lowerCamelCase)
pt_model_loaded.eval()
with torch.no_grad():
_lowercase : Optional[Any] = pt_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[Any] = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : str = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase)
_lowercase : List[Any] = fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Tuple = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : List[str] = load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params)
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase)
@is_pt_flax_cross_test
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase : List[str] = config_inputs_dict.pop('vision_config')
_lowercase : str = config_inputs_dict.pop('text_config')
_lowercase : int = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase)
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.get_pretrained_model_and_inputs()
_lowercase : Optional[int] = model_a(**lowerCamelCase)
_lowercase : Tuple = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : List[Any] = model_a(**lowerCamelCase)
_lowercase : Tuple = after_outputs[0]
_lowercase : Dict = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-5)
@require_flax
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : List[Any] = 13
_lowercase : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Tuple = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Union[str, Any] = random_attention_mask([batch_size, 4])
_lowercase : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModelTester(self)
_lowercase : Any = FlaxBertModelTester(self)
_lowercase : Dict = vit_model_tester.prepare_config_and_inputs()
_lowercase : Any = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : List[str] = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : Tuple = 13
_lowercase : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Union[str, Any] = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Any = random_attention_mask([batch_size, 4])
_lowercase : Dict = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = FlaxCLIPVisionModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = FlaxCLIPVisionModelTester(self)
_lowercase : Union[str, Any] = FlaxBertModelTester(self)
_lowercase : Tuple = clip_model_tester.prepare_config_and_inputs()
_lowercase : str = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : Dict = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian', logit_scale_init_value=1.0)
_lowercase : List[str] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
_lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_lowercase : List[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='np')
_lowercase : List[Any] = model(**lowerCamelCase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
_lowercase : Optional[int] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]])
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3))
| 21 | 0 |
"""simple docstring"""
import cva
import numpy as np
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : str ,lowercase_ : float ,lowercase_ : int ):
if k in (0.04, 0.06):
lowerCAmelCase__ : Optional[Any] = k
lowerCAmelCase__ : Union[str, Any] = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : Tuple ):
return str(self.k )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : str ):
lowerCAmelCase__ : Any = cva.imread(lowercase_ ,0 )
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = img.shape
lowerCAmelCase__ : list[list[int]] = []
lowerCAmelCase__ : Optional[Any] = img.copy()
lowerCAmelCase__ : Dict = cva.cvtColor(lowercase_ ,cva.COLOR_GRAY2RGB )
lowerCAmelCase__ ,lowerCAmelCase__ : Any = np.gradient(lowercase_ )
lowerCAmelCase__ : int = dx**2
lowerCAmelCase__ : str = dy**2
lowerCAmelCase__ : Dict = dx * dy
lowerCAmelCase__ : Dict = 0.04
lowerCAmelCase__ : List[Any] = self.window_size // 2
for y in range(lowercase_ ,h - offset ):
for x in range(lowercase_ ,w - offset ):
lowerCAmelCase__ : int = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase__ : str = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase__ : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase__ : List[Any] = (wxx * wyy) - (wxy**2)
lowerCAmelCase__ : Union[str, Any] = wxx + wyy
lowerCAmelCase__ : Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase : str = HarrisCorner(0.0_4, 3)
__UpperCamelCase , __UpperCamelCase : str = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 106 |
import random
from typing import Any
def UpperCamelCase_( lowerCamelCase_ ) -> list[Any]:
for _ in range(len(lowerCamelCase_ ) ):
_lowercase : Optional[int] = random.randint(0 , len(lowerCamelCase_ ) - 1 )
_lowercase : str = random.randint(0 , len(lowerCamelCase_ ) - 1 )
_lowercase , _lowercase : Optional[int] = data[b], data[a]
return data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = [0, 1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE : int = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 21 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : List[str] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 107 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Tuple = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCamelCase, 'width_multiplier'))
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=64, lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase="swish", lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=None, lowerCamelCase=0.2_5, lowerCamelCase=0.0, lowerCamelCase=0.0, ) -> Any:
"""simple docstring"""
_lowercase : Any = parent
_lowercase : Optional[int] = batch_size
_lowercase : Dict = image_size
_lowercase : str = patch_size
_lowercase : Optional[int] = num_channels
_lowercase : Optional[Any] = make_divisible(5_12 * width_multiplier, divisor=8)
_lowercase : str = hidden_act
_lowercase : Dict = conv_kernel_size
_lowercase : int = output_stride
_lowercase : Optional[Any] = classifier_dropout_prob
_lowercase : Tuple = use_labels
_lowercase : int = is_training
_lowercase : Optional[Any] = num_labels
_lowercase : Dict = initializer_range
_lowercase : List[str] = scope
_lowercase : Tuple = width_multiplier
_lowercase : List[str] = ffn_dropout
_lowercase : Dict = attn_dropout
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : Dict = None
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels)
_lowercase : str = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : int = self.num_labels
_lowercase : Optional[int] = MobileViTVaForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
_lowercase : List[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : int = config_and_inputs
_lowercase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ : Dict = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ : List[Any] = False
lowercase_ : Optional[int] = False
lowercase_ : List[Any] = False
lowercase_ : Tuple = False
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = MobileViTVaModelTester(self)
_lowercase : Tuple = MobileViTVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.')
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(lowerCamelCase)
_lowercase : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Any = [*signature.parameters.keys()]
_lowercase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : Optional[Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : List[Any] = outputs.hidden_states
_lowercase : Tuple = 5
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowercase : Optional[int] = 2
for i in range(len(lowerCamelCase)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2)
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Tuple = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Optional[Any] = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = MobileViTVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> Dict:
_lowercase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256')
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256').to(
lowerCamelCase)
_lowercase : Dict = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Tuple = model(**lowerCamelCase)
# verify the logits
_lowercase : Optional[int] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Optional[int] = model.to(lowerCamelCase)
_lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Tuple = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : List[Any] = model(**lowerCamelCase)
_lowercase : str = outputs.logits
# verify the logits
_lowercase : Tuple = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
], device=lowerCamelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Tuple = model.to(lowerCamelCase)
_lowercase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : int = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**lowerCamelCase)
_lowercase : Any = outputs.logits.detach().cpu()
_lowercase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase, target_sizes=[(50, 60)])
_lowercase : Any = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
_lowercase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase)
_lowercase : Optional[int] = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase__ = (3, 9, -11, 0, 7, 5, 1, -1)
lowerCAmelCase__ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : int
a : Node | None
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Node | None = None
for i in sorted(snake_case__ , reverse=snake_case__ ):
lowerCAmelCase : str = Node(snake_case__ , self.head )
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.head
while node:
yield node.data
lowerCAmelCase : Any = node.next_node
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self ):
"""simple docstring"""
return " -> ".join([str(snake_case__ ) for node in self] )
def a__ ( SCREAMING_SNAKE_CASE : SortedLinkedList , SCREAMING_SNAKE_CASE : SortedLinkedList ):
'''simple docstring'''
return SortedLinkedList(list(SCREAMING_SNAKE_CASE ) + list(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 108 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE : str = "bart"
SCREAMING_SNAKE_CASE : Optional[int] = True
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> int:
if LOAD_DENSE_INDEX:
_lowercase : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
_lowercase : Union[str, Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
_lowercase : str = qar_model.eval()
else:
_lowercase , _lowercase : Any = (None, None)
if MODEL_TYPE == "bart":
_lowercase : Dict = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
_lowercase : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
_lowercase : Any = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
_lowercase : List[Any] = sas_model.eval()
else:
_lowercase , _lowercase : Union[str, Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> str:
if LOAD_DENSE_INDEX:
_lowercase : Optional[Any] = faiss.StandardGpuResources()
_lowercase : Optional[int] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
_lowercase : Tuple = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
_lowercase : Any = faiss.IndexFlatIP(128 )
_lowercase : Union[str, Any] = faiss.index_cpu_to_gpu(lowerCamelCase_ , 1 , lowerCamelCase_ )
wikiaab_gpu_index_flat.add(lowerCamelCase_ ) # TODO fix for larger GPU
else:
_lowercase , _lowercase : Any = (None, None)
_lowercase : List[str] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> Any:
_lowercase : List[str] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
_lowercase : Optional[Any] = elia['train_eli5']
_lowercase : Tuple = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
_lowercase : Union[str, Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCamelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = load_train_data()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=10 ) -> List[str]:
_lowercase : Any = embed_questions_for_retrieval([question] , lowerCamelCase_ , lowerCamelCase_ )
_lowercase , _lowercase : List[str] = eli5_train_q_index.search(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = [elia_train[int(lowerCamelCase_ )] for i in I[0]]
return nn_examples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_="wiki40b" , lowerCamelCase_="dense" , lowerCamelCase_=10 ) -> Dict:
if source == "none":
_lowercase , _lowercase : Union[str, Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_lowercase , _lowercase : Dict = query_qa_dense_index(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
_lowercase , _lowercase : str = query_es_index(
lowerCamelCase_ , lowerCamelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=lowerCamelCase_ , )
_lowercase : List[Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
_lowercase : Union[str, Any] = 'question: {} context: {}'.format(lowerCamelCase_ , lowerCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase_ : None),
} )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_=256 , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=0.95 , lowerCamelCase_=0.8 ) -> Dict:
with torch.no_grad():
_lowercase : str = qa_sas_generate(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_answers=1 , num_beams=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ , do_sample=lowerCamelCase_ , temp=lowerCamelCase_ , top_p=lowerCamelCase_ , top_k=lowerCamelCase_ , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
SCREAMING_SNAKE_CASE : Union[str, Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
SCREAMING_SNAKE_CASE : List[Any] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE : Any = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
SCREAMING_SNAKE_CASE : Optional[int] = st.sidebar.checkbox("Demo options")
if demo_options:
SCREAMING_SNAKE_CASE : List[str] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE : Optional[int] = action_list.index(action_st)
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
SCREAMING_SNAKE_CASE : int = show_type == "Show full text of passages"
else:
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
SCREAMING_SNAKE_CASE : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
SCREAMING_SNAKE_CASE : int = "wiki40b"
SCREAMING_SNAKE_CASE : int = "dense"
SCREAMING_SNAKE_CASE : str = "beam"
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : List[str] = 64
SCREAMING_SNAKE_CASE : Union[str, Any] = 256
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Generation options")
if generate_options:
SCREAMING_SNAKE_CASE : Any = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE : List[Any] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE : int = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE : int = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : Any = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : str = None
# start main text
SCREAMING_SNAKE_CASE : List[str] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
SCREAMING_SNAKE_CASE : str = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE : List[str] = st.text_input("Enter your question here:", "")
else:
SCREAMING_SNAKE_CASE : Optional[int] = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = make_support(question, source=wiki_source, method="dense", n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method="sparse", n_results=10)
SCREAMING_SNAKE_CASE : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE : Optional[Any] = support_list[:10]
SCREAMING_SNAKE_CASE : int = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE : Optional[Any] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
SCREAMING_SNAKE_CASE : List[Any] = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE : Union[str, Any] = "[{}]({})".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE : Any = sec_titles.split(" & ")
SCREAMING_SNAKE_CASE : List[Any] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE : str = find_nearest_training(question)
SCREAMING_SNAKE_CASE : Any = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
SCREAMING_SNAKE_CASE : str = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
SCREAMING_SNAKE_CASE : Tuple = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 21 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _snake_case ( UpperCamelCase : int = 1000000 , UpperCamelCase : int = 10 ):
UpperCAmelCase : defaultdict = defaultdict(UpperCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase : str = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase : Optional[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : List[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _lowerCamelCase( _a ):
lowercase_ : Any = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
SCREAMING_SNAKE_CASE : Any = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
SCREAMING_SNAKE_CASE : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class _lowerCamelCase:
def __call__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
elif titles is None or texts is None:
_lowercase : Dict = titles if texts is None else texts
return super().__call__(
lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
_lowercase : Union[str, Any] = titles if not isinstance(lowerCamelCase, lowerCamelCase) else [titles]
_lowercase : Tuple = texts if not isinstance(lowerCamelCase, lowerCamelCase) else [texts]
_lowercase : Optional[Any] = len(lowerCamelCase)
_lowercase : Any = questions if not isinstance(lowerCamelCase, lowerCamelCase) else [questions] * n_passages
if len(lowerCamelCase) != len(lowerCamelCase):
raise ValueError(
F'''There should be as many titles than texts but got {len(lowerCamelCase)} titles and {len(lowerCamelCase)} texts.''')
_lowercase : Any = super().__call__(lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : Tuple = super().__call__(lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : int = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase, lowerCamelCase)
]
}
if return_attention_mask is not False:
_lowercase : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_lowercase : Union[str, Any] = attention_mask
return self.pad(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 16, lowerCamelCase = 64, lowerCamelCase = 4, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : Union[str, Any] = reader_input['input_ids']
_lowercase , _lowercase , _lowercase : Tuple = reader_output[:3]
_lowercase : Tuple = len(lowerCamelCase)
_lowercase : str = sorted(range(lowerCamelCase), reverse=lowerCamelCase, key=relevance_logits.__getitem__)
_lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowercase : str = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_lowercase : Any = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowercase : List[Any] = sequence_ids.index(self.pad_token_id)
else:
_lowercase : List[str] = len(lowerCamelCase)
_lowercase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCamelCase, top_spans=lowerCamelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCamelCase, start_index=lowerCamelCase, end_index=lowerCamelCase, text=self.decode(sequence_ids[start_index : end_index + 1]), ))
if len(lowerCamelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : str = []
for start_index, start_score in enumerate(lowerCamelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_lowercase : Dict = sorted(lowerCamelCase, key=lambda lowerCamelCase: x[1], reverse=lowerCamelCase)
_lowercase : List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''')
_lowercase : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCamelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class _lowerCamelCase( _a, _a ):
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Any = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
lowercase_ : str = ["""input_ids""", """attention_mask"""]
| 21 | 0 |
import os
from math import logaa
def _a ( SCREAMING_SNAKE_CASE = "base_exp.txt" ):
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) ):
lowercase__ , lowercase__ = list(map(SCREAMING_SNAKE_CASE , line.split(''',''' ) ) )
if x * logaa(SCREAMING_SNAKE_CASE ) > largest:
lowercase__ = x * logaa(SCREAMING_SNAKE_CASE )
lowercase__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 110 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not numbers:
return 0
if not isinstance(lowerCamelCase_ , (list, tuple) ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
_lowercase : int = numbers[0]
for i in range(1 , len(lowerCamelCase_ ) ):
# update the maximum and minimum subarray products
_lowercase : Union[str, Any] = numbers[i]
if number < 0:
_lowercase , _lowercase : Any = min_till_now, max_till_now
_lowercase : Union[str, Any] = max(lowerCamelCase_ , max_till_now * number )
_lowercase : Union[str, Any] = min(lowerCamelCase_ , min_till_now * number )
# update the maximum product found till now
_lowercase : Optional[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
return max_prod
| 21 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 126 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE : Tuple = (3, 9, -11, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowerCamelCase:
lowercase_ : int
lowercase_ : Node | None
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Node | None = None
for i in sorted(lowerCamelCase, reverse=lowerCamelCase):
_lowercase : Tuple = Node(lowerCamelCase, self.head)
def __iter__( self) -> Iterator[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.head
while node:
yield node.data
_lowercase : int = node.next_node
def __len__( self) -> int:
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self) -> str:
"""simple docstring"""
return " -> ".join([str(lowerCamelCase) for node in self])
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> SortedLinkedList:
return SortedLinkedList(list(lowerCamelCase_ ) + list(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 21 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = 3
UpperCamelCase = 250
UpperCamelCase = ids_tensor((batch_size, length) , lowerCamelCase_ )
UpperCamelCase = torch.ones((batch_size, length) , device=lowerCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = MaxLengthCriteria(max_length=10 )
UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
| 343 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Any = KandinskyImgaImgPipeline
lowercase_ : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
lowercase_ : Any = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
lowercase_ : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ : Union[str, Any] = False
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return 1_00
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=10_05, )
_lowercase : Optional[int] = MultilingualCLIP(lowerCamelCase)
_lowercase : List[str] = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Union[str, Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase : Optional[Any] = UNetaDConditionModel(**lowerCamelCase)
return model
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.dummy_text_encoder
_lowercase : List[Any] = self.dummy_tokenizer
_lowercase : int = self.dummy_unet
_lowercase : int = self.dummy_movq
_lowercase : Optional[int] = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowercase : List[Any] = DDIMScheduler(**lowerCamelCase)
_lowercase : List[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Dict:
"""simple docstring"""
_lowercase : List[str] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[Any] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(lowerCamelCase)
# create init_image
_lowercase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[int] = image.cpu().permute(0, 2, 3, 1)[0]
_lowercase : Tuple = Image.fromarray(np.uinta(lowerCamelCase)).convert('RGB').resize((2_56, 2_56))
if str(lowerCamelCase).startswith('mps'):
_lowercase : List[str] = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = 'cpu'
_lowercase : Tuple = self.get_dummy_components()
_lowercase : str = self.pipeline_class(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = pipe(**self.get_dummy_inputs(lowerCamelCase))
_lowercase : Optional[int] = output.images
_lowercase : List[Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase), return_dict=lowerCamelCase, )[0]
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : Tuple = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy')
_lowercase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
_lowercase : Optional[int] = 'A red cartoon frog, 4k'
_lowercase : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior', torch_dtype=torch.floataa)
pipe_prior.to(lowerCamelCase)
_lowercase : Optional[Any] = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1', torch_dtype=torch.floataa)
_lowercase : List[Any] = pipeline.to(lowerCamelCase)
pipeline.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = torch.Generator(device='cpu').manual_seed(0)
_lowercase , _lowercase : List[Any] = pipe_prior(
lowerCamelCase, generator=lowerCamelCase, num_inference_steps=5, negative_prompt='', ).to_tuple()
_lowercase : Union[str, Any] = pipeline(
lowerCamelCase, image=lowerCamelCase, image_embeds=lowerCamelCase, negative_image_embeds=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=1_00, height=7_68, width=7_68, strength=0.2, output_type='np', )
_lowercase : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase)
| 21 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Dict = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _lowerCamelCase( _a ):
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
super().__init__(*lowerCamelCase, **lowerCamelCase)
requires_backends(self, 'vision')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def UpperCamelCase ( self, lowerCamelCase=None) -> int:
"""simple docstring"""
_lowercase : Dict = {}
if top_k is not None:
_lowercase : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self, lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
return super().__call__(lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = load_image(lowerCamelCase)
_lowercase : List[str] = self.image_processor(images=lowerCamelCase, return_tensors=self.framework)
return model_inputs
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.model(**lowerCamelCase)
return model_outputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=5) -> Dict:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowercase : List[Any] = self.model.config.num_labels
if self.framework == "pt":
_lowercase : int = model_outputs.logits.softmax(-1)[0]
_lowercase , _lowercase : Union[str, Any] = probs.topk(lowerCamelCase)
elif self.framework == "tf":
_lowercase : int = stable_softmax(model_outputs.logits, axis=-1)[0]
_lowercase : List[Any] = tf.math.top_k(lowerCamelCase, k=lowerCamelCase)
_lowercase , _lowercase : Any = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''')
_lowercase : str = scores.tolist()
_lowercase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase, lowerCamelCase)]
| 21 | 0 |
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
SCREAMING_SNAKE_CASE_: int =str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_: Any =str(bin(lowerCamelCase_ ) )[2:]
SCREAMING_SNAKE_CASE_: List[Any] =max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
_lowercase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase_( ) -> Optional[int]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=1_8 , __lowerCAmelCase=3_0 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , ):
'''simple docstring'''
lowerCamelCase__ = size if size is not None else {'height': 1_8, 'width': 1_8}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_normalize
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __A ( _a , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = ImageGPTImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ImageGPTImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''clusters''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} )
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
lowerCamelCase__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCAmelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = os.path.join(__lowerCAmelCase , '''image_processor.json''' )
image_processor_first.to_json_file(__lowerCAmelCase )
lowerCamelCase__ = self.image_processing_class.from_json_file(__lowerCAmelCase ).to_dict()
lowerCamelCase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = self.image_processing_class.from_pretrained(__lowerCAmelCase ).to_dict()
lowerCamelCase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __lowerCAmelCase )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__() -> Tuple:
'''simple docstring'''
lowerCamelCase__ = load_dataset('''hf-internal-testing/fixtures_image_utils''' ,split='''test''' )
lowerCamelCase__ = Image.open(dataset[4]['''file'''] )
lowerCamelCase__ = Image.open(dataset[5]['''file'''] )
lowerCamelCase__ = [imagea, imagea]
return images
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
lowerCamelCase__ = prepare_images()
# test non-batched
lowerCamelCase__ = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
lowerCamelCase__ = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __lowerCAmelCase )
# test batched
lowerCamelCase__ = image_processing(__lowerCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
lowerCamelCase__ = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __lowerCAmelCase )
| 209 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=64, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=3, lowerCamelCase=4, lowerCamelCase=None, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase=1, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Any = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Optional[Any] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : str = vocab_size
_lowercase : List[str] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : List[Any] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Any = num_choices
_lowercase : Tuple = scope
_lowercase : Optional[Any] = q_groups
_lowercase : List[str] = k_groups
_lowercase : Optional[int] = v_groups
_lowercase : List[str] = post_attention_groups
_lowercase : Union[str, Any] = intermediate_groups
_lowercase : int = output_groups
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Any = None
if self.use_input_mask:
_lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : Dict = None
_lowercase : int = None
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = SqueezeBertModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase, lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Dict = SqueezeBertForMaskedLM(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForQuestionAnswering(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_labels
_lowercase : int = SqueezeBertForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Any = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.num_labels
_lowercase : List[str] = SqueezeBertForTokenClassification(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = self.num_choices
_lowercase : str = SqueezeBertForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Dict = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase_ : Optional[int] = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Tuple = False
lowercase_ : List[str] = True
lowercase_ : int = False
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = SqueezeBertModelTester(self)
_lowercase : Dict = ConfigTester(self, config_class=lowerCamelCase, dim=37)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = SqueezeBertModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli')
_lowercase : Optional[int] = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]])
_lowercase : List[str] = model(lowerCamelCase)[0]
_lowercase : Union[str, Any] = torch.Size((1, 3))
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : Tuple = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]])
self.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-4))
| 21 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] = 16 , __snake_case : Tuple = "bert-base-cased" ):
'''simple docstring'''
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Dict = load_dataset('glue' , 'mrpc' )
def tokenize_function(__snake_case : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : List[str] = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowerCamelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__snake_case : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(lowerCamelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCAmelCase_ : Tuple = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
return train_dataloader, eval_dataloader
def lowercase__ ( __snake_case : List[str] , __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : Optional[int] = config['lr']
UpperCAmelCase_ : Tuple = int(config['num_epochs'] )
UpperCAmelCase_ : int = int(config['seed'] )
UpperCAmelCase_ : Tuple = int(config['batch_size'] )
UpperCAmelCase_ : Any = args.model_name_or_path
set_seed(lowerCamelCase_ )
UpperCAmelCase_ : str = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
# Instantiate optimizer
UpperCAmelCase_ : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : int = optimizer_cls(params=model.parameters() , lr=lowerCamelCase_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : Union[str, Any] = (len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : List[str] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=0 , num_training_steps=lowerCamelCase_ , )
else:
UpperCAmelCase_ : Dict = DummyScheduler(lowerCamelCase_ , total_num_steps=lowerCamelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Any = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : Union[str, Any] = 0
# Now we train the model
UpperCAmelCase_ : Tuple = evaluate.load('glue' , 'mrpc' )
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Optional[Any] = {}
for epoch in range(lowerCamelCase_ , lowerCamelCase_ ):
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
UpperCAmelCase_ : List[str] = model(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = outputs.loss
UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCAmelCase_ : int = 0
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
UpperCAmelCase_ : List[str] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_ : Dict = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase_ ) - 1:
UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase_ , references=lowerCamelCase_ , )
UpperCAmelCase_ : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCamelCase_ )
UpperCAmelCase_ : List[str] = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
UpperCAmelCase_ : Union[str, Any] = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=lowerCamelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowerCamelCase_ , )
parser.add_argument(
'--output_dir' , type=lowerCamelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=lowerCamelCase_ , default=3 , help='Number of train epochs.' , )
UpperCAmelCase_ : List[str] = parser.parse_args()
UpperCAmelCase_ : List[str] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 29 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Dict = JukeboxTokenizer
lowercase_ : Dict = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
import torch
_lowercase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics')
_lowercase : Optional[Any] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
@require_torch
def UpperCamelCase ( self) -> int:
"""simple docstring"""
import torch
_lowercase : List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics')
_lowercase : List[str] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
| 21 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
_enforce_args(lowerCamelCase_ , lowerCamelCase_ )
if n == 0:
return 0
lowerCAmelCase__ : Union[str, Any] = float('-inf' )
for i in range(1 , n + 1 ):
lowerCAmelCase__ : int = max(
lowerCamelCase_ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowerCamelCase_ ) )
return max_revue
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
_enforce_args(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ : Optional[Any] = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCAmelCase__ : Any = float('-inf' )
for i in range(1 , n + 1 ):
lowerCAmelCase__ : List[Any] = max(
lowerCamelCase_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowerCamelCase_ , lowerCamelCase_ ) , )
lowerCAmelCase__ : Dict = max_revenue
return max_rev[n]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
_enforce_args(lowerCamelCase_ , lowerCamelCase_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCAmelCase__ : int = [float('-inf' ) for _ in range(n + 1 )]
lowerCAmelCase__ : str = 0
for i in range(1 , n + 1 ):
lowerCAmelCase__ : Tuple = max_rev[i]
for j in range(1 , i + 1 ):
lowerCAmelCase__ : Any = max(lowerCamelCase_ , prices[j - 1] + max_rev[i - j] )
lowerCAmelCase__ : Optional[Any] = max_revenue_i
return max_rev[n]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if n < 0:
lowerCAmelCase__ : Optional[int] = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(lowerCamelCase_ )
if n > len(lowerCamelCase_ ):
lowerCAmelCase__ : Tuple = (
'Each integral piece of rod must have a corresponding price. '
F'''Got n = {n} but length of prices = {len(lowerCamelCase_ )}'''
)
raise ValueError(lowerCamelCase_ )
def lowerCAmelCase__ ( ) -> Optional[int]:
lowerCAmelCase__ : List[str] = [6, 10, 12, 15, 20, 23]
lowerCAmelCase__ : Any = len(lowerCamelCase_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCAmelCase__ : Tuple = 36
lowerCAmelCase__ : int = top_down_cut_rod(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ : Tuple = bottom_up_cut_rod(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ : int = naive_cut_rod_recursive(lowerCamelCase_ , lowerCamelCase_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main() | 212 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowercase_ : int = """ssube/stable-diffusion-x4-upscaler-onnx"""
def UpperCamelCase ( self, lowerCamelCase=0) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = floats_tensor((1, 3, 1_28, 1_28), rng=random.Random(lowerCamelCase))
_lowercase : Union[str, Any] = torch.manual_seed(lowerCamelCase)
_lowercase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : str = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : List[Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : int = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[Any] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs()
_lowercase : List[str] = pipe(**lowerCamelCase).images
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
# using the PNDM scheduler by default
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np', )
_lowercase : List[Any] = output.images
_lowercase : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : List[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
_lowercase : str = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', subfolder='scheduler')
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', scheduler=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=20, generator=lowerCamelCase, output_type='np', )
_lowercase : str = output.images
_lowercase : str = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 21 | 0 |
# flake8: noqa
# Lint as: python3
lowerCAmelCase__ :int = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 329 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = 1
_lowercase : Any = 3
_lowercase : Tuple = (32, 32)
_lowercase : Tuple = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(lowerCamelCase)
return image
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
return model
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : str = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
return model
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = RobertaSeriesConfig(
hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=50_06, )
return RobertaSeriesModelWithTransformation(lowerCamelCase)
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
def extract(*lowerCamelCase, **lowerCamelCase):
class _lowerCamelCase:
def __init__( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = torch.ones([0])
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
self.pixel_values.to(lowerCamelCase)
return self
return Out()
return extract
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : List[Any] = self.dummy_cond_unet
_lowercase : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase)
_lowercase : Optional[Any] = self.dummy_vae
_lowercase : List[Any] = self.dummy_text_encoder
_lowercase : Any = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
_lowercase : Tuple = 77
_lowercase : int = self.dummy_image.to(lowerCamelCase)
_lowercase : int = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowercase : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
_lowercase : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase)
_lowercase : Optional[int] = alt_pipe.to(lowerCamelCase)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = 'A painting of a squirrel eating a burger'
_lowercase : Dict = torch.Generator(device=lowerCamelCase).manual_seed(0)
_lowercase : Any = alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='np', image=lowerCamelCase, )
_lowercase : Optional[int] = output.images
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(0)
_lowercase : Optional[Any] = alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='np', image=lowerCamelCase, return_dict=lowerCamelCase, )[0]
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
_lowercase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = self.dummy_cond_unet
_lowercase : Tuple = PNDMScheduler(skip_prk_steps=lowerCamelCase)
_lowercase : str = self.dummy_vae
_lowercase : Optional[Any] = self.dummy_text_encoder
_lowercase : Optional[Any] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
_lowercase : Optional[Any] = 77
_lowercase : str = self.dummy_image.to(lowerCamelCase)
# put models in fp16
_lowercase : List[str] = unet.half()
_lowercase : List[Any] = vae.half()
_lowercase : Any = bert.half()
# make sure here that pndm scheduler skips prk
_lowercase : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
_lowercase : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase)
_lowercase : Any = alt_pipe.to(lowerCamelCase)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : int = 'A painting of a squirrel eating a burger'
_lowercase : Optional[Any] = torch.manual_seed(0)
_lowercase : Union[str, Any] = alt_pipe(
[prompt], generator=lowerCamelCase, num_inference_steps=2, output_type='np', image=lowerCamelCase, ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
# resize to resolution that is divisible by 8 but not 16 or 32
_lowercase : str = init_image.resize((7_60, 5_04))
_lowercase : Optional[int] = 'BAAI/AltDiffusion'
_lowercase : str = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
pipe.enable_attention_slicing()
_lowercase : List[str] = 'A fantasy landscape, trending on artstation'
_lowercase : Any = torch.manual_seed(0)
_lowercase : Dict = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCamelCase, output_type='np', )
_lowercase : List[str] = output.images[0]
_lowercase : Tuple = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_lowercase : Optional[Any] = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : str = init_image.resize((7_68, 5_12))
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy')
_lowercase : str = 'BAAI/AltDiffusion'
_lowercase : Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
pipe.enable_attention_slicing()
_lowercase : int = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : int = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCamelCase, output_type='np', )
_lowercase : Union[str, Any] = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1E-2
| 21 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = ViTImageProcessor if is_vision_available() else None
@property
def snake_case__ ( self : Tuple ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = (3, 32, 1_28)
snake_case_ = tempfile.mkdtemp()
# fmt: off
snake_case_ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
snake_case_ = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowercase ) + "\n" )
snake_case_ = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 1_28},
}
snake_case_ = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowercase , __lowercase )
def snake_case__ ( self : Any , **__lowercase : List[Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def snake_case__ ( self : int , **__lowercase : int ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
snake_case_ = Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) )
return image_input
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = MgpstrProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor.save_pretrained(self.tmpdirname )
snake_case_ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = MgpstrProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor.save_pretrained(self.tmpdirname )
snake_case_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case_ = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
snake_case_ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=__lowercase , image_processor=__lowercase )
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(__lowercase , return_tensors="np" )
snake_case_ = processor(images=__lowercase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=__lowercase , image_processor=__lowercase )
snake_case_ = 'test'
snake_case_ = processor(text=__lowercase )
snake_case_ = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=__lowercase , image_processor=__lowercase )
snake_case_ = 'test'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=__lowercase , image_processor=__lowercase )
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.char_decode(__lowercase )
snake_case_ = tokenizer.batch_decode(__lowercase )
snake_case_ = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(__lowercase , __lowercase )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=__lowercase , image_processor=__lowercase )
snake_case_ = None
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=__lowercase , image_processor=__lowercase )
snake_case_ = torch.randn(1 , 27 , 38 )
snake_case_ = torch.randn(1 , 27 , 5_02_57 )
snake_case_ = torch.randn(1 , 27 , 3_05_22 )
snake_case_ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 187 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowerCamelCase( _a ):
lowercase_ : Dict = """deformable_detr"""
lowercase_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=3, lowerCamelCase=3_00, lowerCamelCase=10_24, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase="resnet50", lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=False, lowerCamelCase=3_00, lowerCamelCase=False, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : List[str] = backbone_config.get('model_type')
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Optional[int] = config_class.from_dict(lowerCamelCase)
_lowercase : Tuple = use_timm_backbone
_lowercase : List[str] = backbone_config
_lowercase : Tuple = num_channels
_lowercase : Optional[Any] = num_queries
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : Optional[int] = d_model
_lowercase : int = encoder_ffn_dim
_lowercase : List[Any] = encoder_layers
_lowercase : str = encoder_attention_heads
_lowercase : str = decoder_ffn_dim
_lowercase : Optional[Any] = decoder_layers
_lowercase : List[str] = decoder_attention_heads
_lowercase : Optional[int] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Any = activation_function
_lowercase : Optional[int] = init_std
_lowercase : int = init_xavier_std
_lowercase : Union[str, Any] = encoder_layerdrop
_lowercase : Tuple = auxiliary_loss
_lowercase : Union[str, Any] = position_embedding_type
_lowercase : str = backbone
_lowercase : List[Any] = use_pretrained_backbone
_lowercase : Any = dilation
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : Dict = encoder_n_points
_lowercase : Dict = decoder_n_points
_lowercase : Dict = two_stage
_lowercase : Union[str, Any] = two_stage_num_proposals
_lowercase : str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : Tuple = class_cost
_lowercase : int = bbox_cost
_lowercase : Optional[int] = giou_cost
# Loss coefficients
_lowercase : Optional[Any] = mask_loss_coefficient
_lowercase : Dict = dice_loss_coefficient
_lowercase : Tuple = bbox_loss_coefficient
_lowercase : Optional[int] = giou_loss_coefficient
_lowercase : Union[str, Any] = eos_coefficient
_lowercase : Union[str, Any] = focal_alpha
_lowercase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
_lowercase : Union[str, Any] = self.backbone_config.to_dict()
_lowercase : Tuple = self.__class__.model_type
return output
| 21 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( _a , unittest.TestCase ):
"""simple docstring"""
_a = DDIMPipeline
_a = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_a = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
_a = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
UpperCamelCase__ :List[Any] = DDIMScheduler()
UpperCamelCase__ :str = {'unet': unet, 'scheduler': scheduler}
return components
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('''mps''' ):
UpperCamelCase__ :str = torch.manual_seed(UpperCamelCase_ )
else:
UpperCamelCase__ :Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = 'cpu'
UpperCamelCase__ :List[Any] = self.get_dummy_components()
UpperCamelCase__ :int = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Dict = pipe(**UpperCamelCase_ ).images
UpperCamelCase__ :List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
UpperCamelCase__ :List[Any] = np.array(
[1.0_00e00, 5.7_17e-01, 4.7_17e-01, 1.0_00e00, 0.0_00e00, 1.0_00e00, 3.0_00e-04, 0.0_00e00, 9.0_00e-04] )
UpperCamelCase__ :Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1e-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = 'google/ddpm-cifar10-32'
UpperCamelCase__ :Dict = UNetaDModel.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Tuple = DDIMScheduler()
UpperCamelCase__ :List[Any] = DDIMPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
ddim.to(UpperCamelCase_ )
ddim.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :int = torch.manual_seed(0 )
UpperCamelCase__ :Union[str, Any] = ddim(generator=UpperCamelCase_ , eta=0.0 , output_type='''numpy''' ).images
UpperCamelCase__ :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ :Optional[int] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = 'google/ddpm-ema-bedroom-256'
UpperCamelCase__ :Union[str, Any] = UNetaDModel.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = DDIMScheduler.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Tuple = DDIMPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
ddpm.to(UpperCamelCase_ )
ddpm.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = ddpm(generator=UpperCamelCase_ , output_type='''numpy''' ).images
UpperCamelCase__ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase__ :int = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 97 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[str] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__A = logging.get_logger(__name__)
@dataclass
class _lowerCAmelCase ( _a ):
"""simple docstring"""
__magic_name__ :int = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **__UpperCAmelCase ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCAmelCase__ :Tuple = deprecated_arg[3:]
setattr(self , __UpperCAmelCase , not kwargs.pop(__UpperCAmelCase ) )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
lowerCAmelCase__ :Tuple = kwargs.pop('torchscript' , self.torchscript )
lowerCAmelCase__ :Optional[Any] = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
lowerCAmelCase__ :Dict = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**__UpperCAmelCase )
__magic_name__ :bool = field(default=_a , metadata={"""help""": """Trace the models using torchscript"""} )
__magic_name__ :bool = field(default=_a , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
__magic_name__ :str = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def snake_case ( self ):
'''simple docstring'''
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
lowerCAmelCase__ :Union[str, Any] = torch.device('cpu' )
lowerCAmelCase__ :List[Any] = 0
elif is_torch_tpu_available():
lowerCAmelCase__ :int = xm.xla_device()
lowerCAmelCase__ :Any = 0
else:
lowerCAmelCase__ :Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ :str = torch.cuda.device_count()
return device, n_gpu
@property
def snake_case ( self ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def snake_case ( self ):
'''simple docstring'''
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def snake_case ( self ):
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def snake_case ( self ):
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def snake_case ( self ):
'''simple docstring'''
return self.n_gpu > 0
| 293 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
_lowercase : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowercase : Tuple = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowercase : str = np.concatenate(lowerCamelCase_ , axis=0 )
_lowercase : Dict = np.array(lowerCamelCase_ ).astype(np.floataa ) / 2_55.0
_lowercase : Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowercase : str = 2.0 * image - 1.0
_lowercase : Tuple = torch.from_numpy(lowerCamelCase_ )
elif isinstance(image[0] , torch.Tensor ):
_lowercase : Any = torch.cat(lowerCamelCase_ , dim=0 )
return image
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0.99_95 ) -> Tuple:
if not isinstance(lowerCamelCase_ , np.ndarray ):
_lowercase : List[Any] = True
_lowercase : Any = va.device
_lowercase : Union[str, Any] = va.cpu().numpy()
_lowercase : int = va.cpu().numpy()
_lowercase : int = np.sum(va * va / (np.linalg.norm(lowerCamelCase_ ) * np.linalg.norm(lowerCamelCase_ )) )
if np.abs(lowerCamelCase_ ) > DOT_THRESHOLD:
_lowercase : Any = (1 - t) * va + t * va
else:
_lowercase : Dict = np.arccos(lowerCamelCase_ )
_lowercase : str = np.sin(lowerCamelCase_ )
_lowercase : int = theta_a * t
_lowercase : Dict = np.sin(lowerCamelCase_ )
_lowercase : Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowercase : List[Any] = sin_theta_t / sin_theta_a
_lowercase : Dict = sa * va + sa * va
if inputs_are_torch:
_lowercase : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
return va
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
for param in model.parameters():
_lowercase : Any = value
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCamelCase, text_encoder=lowerCamelCase, clip_model=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, coca_model=lowerCamelCase, coca_tokenizer=lowerCamelCase, coca_transform=lowerCamelCase, )
_lowercase : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size, lowerCamelCase)
else feature_extractor.size['shortest_edge']
)
_lowercase : Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
set_requires_grad(self.text_encoder, lowerCamelCase)
set_requires_grad(self.clip_model, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase = "auto") -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowercase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = min(int(num_inference_steps * strength), lowerCamelCase)
_lowercase : List[Any] = max(num_inference_steps - init_timestep, 0)
_lowercase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase, torch.Tensor):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(lowerCamelCase)}''')
_lowercase : Any = image.to(device=lowerCamelCase, dtype=lowerCamelCase)
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(lowerCamelCase)
]
_lowercase : int = torch.cat(lowerCamelCase, dim=0)
else:
_lowercase : int = self.vae.encode(lowerCamelCase).latent_dist.sample(lowerCamelCase)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : str = 0.1_8_2_1_5 * init_latents
_lowercase : List[str] = init_latents.repeat_interleave(lowerCamelCase, dim=0)
_lowercase : List[str] = randn_tensor(init_latents.shape, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
# get latents
_lowercase : Any = self.scheduler.add_noise(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : str = init_latents
return latents
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = self.coca_transform(lowerCamelCase).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_lowercase : List[str] = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
_lowercase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('<end_of_text>')[0].replace('<start_of_text>', '').rstrip(' .,')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Tuple = self.feature_extractor.preprocess(lowerCamelCase)
_lowercase : List[str] = torch.from_numpy(clip_image_input['pixel_values'][0]).unsqueeze(0).to(self.device).half()
_lowercase : int = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : int = image_embeddings_clip.repeat_interleave(lowerCamelCase, dim=0)
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : List[Any] = latents.detach().requires_grad_()
_lowercase : Union[str, Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Tuple = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_lowercase : Any = self.scheduler.alphas_cumprod[timestep]
_lowercase : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowercase : List[str] = torch.sqrt(lowerCamelCase)
_lowercase : Dict = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, lowerCamelCase):
_lowercase : Dict = self.scheduler.sigmas[index]
_lowercase : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler)} not supported''')
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Dict = 1 / 0.1_8_2_1_5 * sample
_lowercase : Optional[Any] = self.vae.decode(lowerCamelCase).sample
_lowercase : int = (image / 2 + 0.5).clamp(0, 1)
_lowercase : Any = transforms.Resize(self.feature_extractor_size)(lowerCamelCase)
_lowercase : Optional[Any] = self.normalize(lowerCamelCase).to(latents.dtype)
_lowercase : List[str] = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : Optional[Any] = spherical_dist_loss(lowerCamelCase, lowerCamelCase).mean() * clip_guidance_scale
_lowercase : str = -torch.autograd.grad(lowerCamelCase, lowerCamelCase)[0]
if isinstance(self.scheduler, lowerCamelCase):
_lowercase : Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowercase : List[str] = noise_pred_original
else:
_lowercase : List[Any] = noise_pred_original - torch.sqrt(lowerCamelCase) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = 5_12, lowerCamelCase = 5_12, lowerCamelCase = 0.6, lowerCamelCase = 50, lowerCamelCase = 7.5, lowerCamelCase = 1, lowerCamelCase = 0.0, lowerCamelCase = 1_00, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, lowerCamelCase = 0.8, lowerCamelCase = 0.1, lowerCamelCase = 0.1, ) -> int:
"""simple docstring"""
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(lowerCamelCase)} generators.''')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''')
if isinstance(lowerCamelCase, torch.Generator) and batch_size > 1:
_lowercase : Dict = [generator] + [None] * (batch_size - 1)
_lowercase : Optional[int] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowercase : Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowercase : str = ', '.join(lowerCamelCase)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : List[Any] = self.get_image_description(lowerCamelCase)
if style_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : Dict = self.get_image_description(lowerCamelCase)
# get prompt text embeddings for content and style
_lowercase : Optional[int] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : Optional[int] = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_lowercase : Union[str, Any] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : List[Any] = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_lowercase : Any = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# duplicate text embeddings for each generation per prompt
_lowercase : Dict = text_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# set timesteps
_lowercase : Dict = 'offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_offset:
_lowercase : Any = 1
self.scheduler.set_timesteps(lowerCamelCase, **lowerCamelCase)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_lowercase , _lowercase : List[Any] = self.get_timesteps(lowerCamelCase, lowerCamelCase, self.device)
_lowercase : str = timesteps[:1].repeat(lowerCamelCase)
# Preprocess image
_lowercase : str = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : int = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : Optional[int] = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
if clip_guidance_scale > 0:
_lowercase : Optional[int] = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = slerp(
lowerCamelCase, lowerCamelCase, lowerCamelCase)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowercase : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowercase : Tuple = content_text_input.input_ids.shape[-1]
_lowercase : Union[str, Any] = self.tokenizer([''], padding='max_length', max_length=lowerCamelCase, return_tensors='pt')
_lowercase : int = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_lowercase : Union[str, Any] = uncond_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowercase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowercase : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowercase : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowercase : List[Any] = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='cpu', dtype=lowerCamelCase).to(
self.device)
else:
_lowercase : Any = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase)
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
_lowercase : Tuple = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_lowercase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowercase : Dict = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_eta:
_lowercase : List[Any] = eta
# check if the scheduler accepts generator
_lowercase : Dict = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_lowercase : str = generator
with self.progress_bar(total=lowerCamelCase):
for i, t in enumerate(lowerCamelCase):
# expand the latents if we are doing classifier free guidance
_lowercase : List[str] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_lowercase : List[Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Dict = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowercase , _lowercase : Optional[Any] = noise_pred.chunk(2)
_lowercase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowercase : Tuple = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_lowercase , _lowercase : List[Any] = self.cond_fn(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, )
# compute the previous noisy sample x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Any = 1 / 0.1_8_2_1_5 * latents
_lowercase : List[str] = self.vae.decode(lowerCamelCase).sample
_lowercase : Tuple = (image / 2 + 0.5).clamp(0, 1)
_lowercase : List[Any] = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase : List[Any] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase)
| 21 | 0 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Dict ) ->Tuple:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCamelCase__ : Tuple =flax_key_tuple[:-1] + ('weight',)
lowerCamelCase__ : int =torch.permute(lowerCamelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCamelCase_ ):
# linear layer
lowerCamelCase__ : Any =flax_key_tuple[:-1] + ('weight',)
lowerCamelCase__ : int =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase__ : List[str] =flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : int ) ->List[str]:
if "metadata" in layer:
lowerCamelCase__ : Any =layer.split('metadata' )
lowerCamelCase__ : Union[str, Any] =''.join(split_layer[0] )[:-1]
lowerCamelCase__ : List[str] =[tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
lowerCamelCase__ : str =layer.split('kvstore' )
lowerCamelCase__ : Union[str, Any] =''.join(split_layer[0] )[:-1]
lowerCamelCase__ : Optional[Any] =[tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
lowerCamelCase__ : int =layer.split('/' )
lowerCamelCase__ : Tuple ='/'.join(split_layer[:-1] )
lowerCamelCase__ : Optional[Any] =(split_layer[-1],)
if "kvstore/path" in layer:
lowerCamelCase__ : str =f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
lowerCamelCase__ : List[str] ='file'
else:
lowerCamelCase__ : Optional[int] =checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : int ) ->Dict:
lowerCamelCase__ : Tuple =rename_keys(lowerCamelCase_ )
lowerCamelCase__ : List[str] ={}
for k, v in current_block.items():
lowerCamelCase__ : Dict =v
lowerCamelCase__ : Any =new_current_block
torch.save(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Tuple = WEIGHTS_NAME ) ->Dict:
lowerCamelCase__ : Optional[Any] =convert_file_size_to_int(lowerCamelCase_ )
lowerCamelCase__ : Any =[]
lowerCamelCase__ : Optional[Any] ={}
lowerCamelCase__ : Any =0
lowerCamelCase__ : str =0
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
lowerCamelCase__ : Union[str, Any] =serialization.msgpack_restore(fp.read() )['optimizer']['target']
lowerCamelCase__ : Optional[int] =flatten_dict(lowerCamelCase_ , sep='/' )
lowerCamelCase__ : str ={}
for layer in checkpoint_info.keys():
lowerCamelCase__ : List[Any] =get_key_and_tensorstore_dict(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if curr_real_layer_name in all_layers:
lowerCamelCase__ : List[Any] =content
else:
lowerCamelCase__ : Dict ={split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCamelCase__ : Dict =ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowerCamelCase__ : Tuple =torch.tensor(lowerCamelCase_ )
lowerCamelCase__ : Dict =raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowerCamelCase__ : str =rename_base_flax_keys(tuple(key.split('/' ) ) , lowerCamelCase_ )
lowerCamelCase__ : Any ='/'.join(lowerCamelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCamelCase__ : List[str] =os.path.join(
lowerCamelCase_ , weights_name.replace('.bin' , f"""-{len(lowerCamelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowerCamelCase__ : Any ={}
lowerCamelCase__ : str =0
lowerCamelCase__ : List[str] =raw_weights.to(getattr(lowerCamelCase_ , lowerCamelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCamelCase__ : List[Any] =os.path.join(lowerCamelCase_ , weights_name.replace('.bin' , f"""-{len(lowerCamelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCamelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCamelCase__ : int ={}
lowerCamelCase__ : Optional[Any] ={}
for idx, shard in enumerate(lowerCamelCase_ ):
lowerCamelCase__ : str =weights_name.replace(
'.bin' , f"""-{idx+1:05d}-of-{len(lowerCamelCase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
lowerCamelCase__ : List[str] =os.path.join(lowerCamelCase_ , weights_name.replace('.bin' , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCamelCase__ : int =shard
for key in shard:
lowerCamelCase__ : Optional[int] =shard_file
# Add the metadata
lowerCamelCase__ : Any ={'total_size': total_size}
lowerCamelCase__ : Tuple ={'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , 'w' , encoding='utf-8' ) as f:
lowerCamelCase__ : Tuple =json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + '\n'
f.write(lowerCamelCase_ )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
lowerCAmelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCAmelCase_ ( ) ->str:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCamelCase__ : Optional[Any] =SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
lowerCamelCase__ : str =SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
lowerCamelCase__ : str =TaTokenizer.from_pretrained('t5-small' )
lowerCamelCase__ : Tuple ='A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
lowerCamelCase__ : Optional[int] =tokenizer(lowerCamelCase_ , return_tensors='pt' ).input_ids
lowerCamelCase__ : Tuple =model.generate(lowerCamelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 126 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = ConsistencyModelPipeline
lowercase_ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase_ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase_ : List[str] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet', )
return unet
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet_class_cond', )
return unet
def UpperCamelCase ( self, lowerCamelCase=False) -> Dict:
"""simple docstring"""
if class_cond:
_lowercase : Union[str, Any] = self.dummy_cond_unet
else:
_lowercase : Union[str, Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
_lowercase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Tuple:
"""simple docstring"""
if str(lowerCamelCase).startswith('mps'):
_lowercase : str = torch.manual_seed(lowerCamelCase)
else:
_lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Optional[int] = self.get_dummy_components()
_lowercase : str = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : Dict = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : int = image[0, -3:, -3:, -1]
_lowercase : Dict = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase)
_lowercase : Any = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Any = 0
_lowercase : List[str] = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Any = self.get_dummy_components()
_lowercase : Optional[Any] = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : List[str] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Union[str, Any] = 1
_lowercase : Tuple = None
_lowercase : Tuple = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase)
_lowercase : Dict = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : Optional[Any] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Tuple = 1
_lowercase : int = None
_lowercase : Tuple = 0
_lowercase : Dict = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase=False, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = torch.manual_seed(lowerCamelCase)
_lowercase : str = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
_lowercase : Optional[Any] = self.get_fixed_latents(seed=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase, shape=lowerCamelCase)
_lowercase : Tuple = latents
return inputs
def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Any:
"""simple docstring"""
if type(lowerCamelCase) == str:
_lowercase : Union[str, Any] = torch.device(lowerCamelCase)
_lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : List[str] = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
return latents
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Any = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : Optional[Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Union[str, Any] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_inputs()
_lowercase : int = 1
_lowercase : Optional[Any] = None
_lowercase : str = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : List[Any] = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
@require_torch_a
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Optional[int] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase):
_lowercase : Dict = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@require_torch_a
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : int = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase)
_lowercase : int = 1
_lowercase : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase):
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : int = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 21 | 0 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_SCREAMING_SNAKE_CASE = "src/transformers"
_SCREAMING_SNAKE_CASE = "docs/source/en"
_SCREAMING_SNAKE_CASE = "."
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase = f.readlines()
# Find the start prompt.
UpperCamelCase = 0
while not lines[start_index].startswith(lowerCamelCase_ ):
start_index += 1
start_index += 1
UpperCamelCase = start_index
while not lines[end_index].startswith(lowerCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_SCREAMING_SNAKE_CASE = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_SCREAMING_SNAKE_CASE = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_SCREAMING_SNAKE_CASE = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_SCREAMING_SNAKE_CASE = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
_SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , lowerCamelCase_ )
return [m.group(0 ) for m in matches]
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = 2 if text == '✅' or text == '❌' else len(lowerCamelCase_ )
UpperCamelCase = (width - text_length) // 2
UpperCamelCase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCamelCase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
UpperCamelCase = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
UpperCamelCase = collections.defaultdict(lowerCamelCase_ )
UpperCamelCase = collections.defaultdict(lowerCamelCase_ )
UpperCamelCase = collections.defaultdict(lowerCamelCase_ )
UpperCamelCase = collections.defaultdict(lowerCamelCase_ )
UpperCamelCase = collections.defaultdict(lowerCamelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCamelCase_ ):
UpperCamelCase = None
if attr_name.endswith("""Tokenizer""" ):
UpperCamelCase = slow_tokenizers
UpperCamelCase = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
UpperCamelCase = fast_tokenizers
UpperCamelCase = attr_name[:-13]
elif _re_tf_models.match(lowerCamelCase_ ) is not None:
UpperCamelCase = tf_models
UpperCamelCase = _re_tf_models.match(lowerCamelCase_ ).groups()[0]
elif _re_flax_models.match(lowerCamelCase_ ) is not None:
UpperCamelCase = flax_models
UpperCamelCase = _re_flax_models.match(lowerCamelCase_ ).groups()[0]
elif _re_pt_models.match(lowerCamelCase_ ) is not None:
UpperCamelCase = pt_models
UpperCamelCase = _re_pt_models.match(lowerCamelCase_ ).groups()[0]
if lookup_dict is not None:
while len(lowerCamelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
UpperCamelCase = True
break
# Try again after removing the last word in the name
UpperCamelCase = ''.join(camel_case_split(lowerCamelCase_ )[:-1] )
# Let's build that table!
UpperCamelCase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
UpperCamelCase = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
UpperCamelCase = [len(lowerCamelCase_ ) + 2 for c in columns]
UpperCamelCase = max([len(lowerCamelCase_ ) for name in model_names] ) + 2
# Build the table per se
UpperCamelCase = '|' + '|'.join([_center_text(lowerCamelCase_ , lowerCamelCase_ ) for c, w in zip(lowerCamelCase_ , lowerCamelCase_ )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
UpperCamelCase = {True: '✅', False: '❌'}
for name in model_names:
UpperCamelCase = model_name_to_prefix[name]
UpperCamelCase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCamelCase_ , lowerCamelCase_ ) for l, w in zip(lowerCamelCase_ , lowerCamelCase_ )] ) + "|\n"
return table
def lowercase( UpperCamelCase_=False ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = _find_text_in_file(
filename=os.path.join(lowerCamelCase_ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
UpperCamelCase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCamelCase_ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 343 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : int = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> tuple[int, int]:
_lowercase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_lowercase : int = x_den * y_den * z_den
_lowercase : int = gcd(lowerCamelCase_ , lowerCamelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_( lowerCamelCase_ = 35 ) -> int:
_lowercase : set = set()
_lowercase : int
_lowercase : Fraction = Fraction(0 )
_lowercase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_lowercase : int = x_num * y_den + x_den * y_num
_lowercase : int = x_den * y_den
_lowercase : str = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : List[Any] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_lowercase : List[Any] = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : int = int(sqrt(lowerCamelCase_ ) )
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Optional[int] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=-1
_lowercase : Any = x_num * y_num
_lowercase : str = x_den * y_num + x_num * y_den
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : int = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : str = x_num * x_num * y_num * y_num
_lowercase : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : List[str] = int(sqrt(lowerCamelCase_ ) )
_lowercase : Union[str, Any] = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Tuple = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
for num, den in unique_s:
total += Fraction(lowerCamelCase_ , lowerCamelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 21 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _SCREAMING_SNAKE_CASE ( ) -> Any:
__A : Dict = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowerCamelCase_ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowerCamelCase_ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowerCamelCase_ )
return parser.parse_args()
def _SCREAMING_SNAKE_CASE ( ) -> int:
__A : List[str] = parse_args()
# Import training_script as a module.
__A : int = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : Optional[int] = importlib.import_module(lowerCamelCase_ )
# Patch sys.argv
__A : List[Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 280 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 | 0 |
"""simple docstring"""
from math import factorial
_UpperCAmelCase = {str(d): factorial(d) for d in range(1_0)}
def __magic_name__ ( lowercase ):
return sum(DIGIT_FACTORIAL[d] for d in str(lowerCamelCase_ ) )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: str =7 * factorial(9 ) + 1
return sum(i for i in range(3 , lowerCamelCase_ ) if sum_of_digit_factorial(lowerCamelCase_ ) == i )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 173 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
if len(lowerCamelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
_lowercase : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __A ( _a ):
'''simple docstring'''
lowerCAmelCase_ = """openai/whisper-base"""
lowerCAmelCase_ = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
lowerCAmelCase_ = """transcriber"""
lowerCAmelCase_ = WhisperProcessor
lowerCAmelCase_ = WhisperForConditionalGeneration
lowerCAmelCase_ = ["""audio"""]
lowerCAmelCase_ = ["""text"""]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.pre_processor(__lowerCAmelCase , return_tensors='''pt''' ).input_features
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.model.generate(inputs=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.pre_processor.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )[0]
| 209 |
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCamelCase_( lowerCamelCase_ = 200_0000 ) -> int:
_lowercase : list[int] = [0]
_lowercase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
_lowercase : int = 0
# an estimate of b, using the quadratic formula
_lowercase : float
# the largest integer less than b_estimate
_lowercase : int
# the largest integer less than b_estimate
_lowercase : int
# the triangle number corresponding to b_floor
_lowercase : int
# the triangle number corresponding to b_ceil
_lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowercase : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowercase : List[str] = floor(lowerCamelCase_ )
_lowercase : Dict = ceil(lowerCamelCase_ )
_lowercase : List[str] = triangle_numbers[b_floor]
_lowercase : List[str] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Union[str, Any] = triangle_b_first_guess * triangle_a
_lowercase : Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Any = triangle_b_second_guess * triangle_a
_lowercase : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 21 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCAmelCase_ : List[str] = len(_UpperCamelCase ) - 1
def __UpperCAmelCase ( self , _UpperCamelCase ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase_ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _UpperCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_UpperCamelCase ) , 5 ) == 1
return output_values
def __UpperCAmelCase ( self , _UpperCamelCase ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase_ : Optional[int] = self.basis_function(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = 0.0
UpperCAmelCase_ : Optional[Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self , _UpperCamelCase = 0.01 ) -> int:
from matplotlib import pyplot as plt # type: ignore
UpperCAmelCase_ : list[float] = [] # x coordinates of points to plot
UpperCAmelCase_ : list[float] = [] # y coordinates of points to plot
UpperCAmelCase_ : str = 0.0
while t <= 1:
UpperCAmelCase_ : List[Any] = self.bezier_curve_function(_UpperCamelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
UpperCAmelCase_ : List[str] = [i[0] for i in self.list_of_points]
UpperCAmelCase_ : Dict = [i[1] for i in self.list_of_points]
plt.plot(
_UpperCamelCase , _UpperCamelCase , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(_UpperCamelCase , _UpperCamelCase , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 29 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _lowerCamelCase:
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : str = np.abs((a - b)).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Any = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase , _lowercase : Union[str, Any] = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : str = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : Tuple = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase)
_lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : Tuple = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : str = after_output[0]
_lowercase : Optional[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-3)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase , _lowercase : Any = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : Tuple = model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase)
_lowercase : int = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase), vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : Optional[Any] = to_atuple(vision_model.config.image_size)
_lowercase : Any = to_atuple(vision_model.config.patch_size)
_lowercase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len))
_lowercase : List[str] = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase), text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
pt_model.to(lowerCamelCase)
pt_model.eval()
# prepare inputs
_lowercase : Any = inputs_dict
_lowercase : Optional[int] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
_lowercase : Tuple = pt_model(**lowerCamelCase).to_tuple()
_lowercase : Any = fx_model(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase)
_lowercase : List[Any] = fx_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase)
_lowercase : List[Any] = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase)
pt_model_loaded.to(lowerCamelCase)
pt_model_loaded.eval()
with torch.no_grad():
_lowercase : Optional[Any] = pt_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[Any] = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : str = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase)
_lowercase : List[Any] = fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Tuple = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : List[str] = load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params)
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase)
@is_pt_flax_cross_test
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase : List[str] = config_inputs_dict.pop('vision_config')
_lowercase : str = config_inputs_dict.pop('text_config')
_lowercase : int = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase)
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.get_pretrained_model_and_inputs()
_lowercase : Optional[int] = model_a(**lowerCamelCase)
_lowercase : Tuple = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : List[Any] = model_a(**lowerCamelCase)
_lowercase : Tuple = after_outputs[0]
_lowercase : Dict = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-5)
@require_flax
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : List[Any] = 13
_lowercase : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Tuple = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Union[str, Any] = random_attention_mask([batch_size, 4])
_lowercase : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModelTester(self)
_lowercase : Any = FlaxBertModelTester(self)
_lowercase : Dict = vit_model_tester.prepare_config_and_inputs()
_lowercase : Any = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : List[str] = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : Tuple = 13
_lowercase : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Union[str, Any] = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Any = random_attention_mask([batch_size, 4])
_lowercase : Dict = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = FlaxCLIPVisionModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = FlaxCLIPVisionModelTester(self)
_lowercase : Union[str, Any] = FlaxBertModelTester(self)
_lowercase : Tuple = clip_model_tester.prepare_config_and_inputs()
_lowercase : str = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : Dict = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian', logit_scale_init_value=1.0)
_lowercase : List[str] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
_lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_lowercase : List[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='np')
_lowercase : List[Any] = model(**lowerCamelCase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
_lowercase : Optional[int] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]])
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3))
| 21 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowerCamelCase__ = logging.getLogger(__name__)
@dataclass
class A__ :
lowercase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase = field(
default=_a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowercase = field(
default=_a , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowercase = field(
default=_a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowercase = field(
default=_a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowercase = field(
default=_a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class A__ :
lowercase = field(
default=_a , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase = field(
default=_a , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
lowercase = field(
default=_a , metadata={'help': 'Train language if it is different from the evaluation language.'} )
lowercase = field(
default=_a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase = field(
default=_a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase = field(
default=_a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowercase = field(
default=_a , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
lowercase = field(
default=_a , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowercase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowercase = field(
default=_a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowercase = field(
default=_a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCAmelCase__ ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , lowerCamelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCAmelCase__ : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase__ : str = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase__ : List[str] = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : str = train_dataset.features['label'].names
if training_args.do_eval:
lowerCAmelCase__ : Optional[int] = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : List[Any] = eval_dataset.features['label'].names
if training_args.do_predict:
lowerCAmelCase__ : int = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : str = predict_dataset.features['label'].names
# Labels
lowerCAmelCase__ : Dict = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , idalabel={str(lowerCamelCase_ ): label for i, label in enumerate(lowerCamelCase_ )} , labelaid={label: i for i, label in enumerate(lowerCamelCase_ )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ : List[Any] = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ : Optional[Any] = False
def preprocess_function(SCREAMING_SNAKE_CASE_ ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=lowerCamelCase_ , max_length=data_args.max_seq_length , truncation=lowerCamelCase_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase__ : List[Any] = min(len(lowerCamelCase_ ) , data_args.max_train_samples )
lowerCAmelCase__ : str = train_dataset.select(range(lowerCamelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCAmelCase__ : Optional[Any] = train_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase__ : Optional[Any] = min(len(lowerCamelCase_ ) , data_args.max_eval_samples )
lowerCAmelCase__ : Optional[int] = eval_dataset.select(range(lowerCamelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCAmelCase__ : int = eval_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase__ : int = min(len(lowerCamelCase_ ) , data_args.max_predict_samples )
lowerCAmelCase__ : str = predict_dataset.select(range(lowerCamelCase_ ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
lowerCAmelCase__ : Tuple = predict_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
lowerCAmelCase__ : List[Any] = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[str] = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
lowerCAmelCase__ : List[Any] = np.argmax(lowerCamelCase_ , axis=1 )
return metric.compute(predictions=lowerCamelCase_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ : str = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ : Optional[int] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ : Any = None
# Initialize our Trainer
lowerCAmelCase__ : str = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
lowerCAmelCase__ : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ : int = last_checkpoint
lowerCAmelCase__ : List[str] = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
lowerCAmelCase__ : Tuple = train_result.metrics
lowerCAmelCase__ : List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
lowerCAmelCase__ : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ : List[Any] = trainer.evaluate(eval_dataset=lowerCamelCase_ )
lowerCAmelCase__ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
lowerCAmelCase__ : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
lowerCAmelCase__ : List[Any] = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict' )
lowerCAmelCase__ : Tuple = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase_ )
)
lowerCAmelCase__ : Union[str, Any] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('predict' , lowerCamelCase_ )
trainer.save_metrics('predict' , lowerCamelCase_ )
lowerCAmelCase__ : List[str] = np.argmax(lowerCamelCase_ , axis=1 )
lowerCAmelCase__ : Optional[Any] = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase_ ):
lowerCAmelCase__ : Union[str, Any] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main() | 212 |
import random
from typing import Any
def UpperCamelCase_( lowerCamelCase_ ) -> list[Any]:
for _ in range(len(lowerCamelCase_ ) ):
_lowercase : Optional[int] = random.randint(0 , len(lowerCamelCase_ ) - 1 )
_lowercase : str = random.randint(0 , len(lowerCamelCase_ ) - 1 )
_lowercase , _lowercase : Optional[int] = data[b], data[a]
return data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = [0, 1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE : int = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 21 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase__ ( a__: Tuple ) -> float:
'''simple docstring'''
return np.dot(lowerCamelCase_ , lowerCamelCase_ )
class __a :
def __init__( self , *,
_SCREAMING_SNAKE_CASE = np.inf , _SCREAMING_SNAKE_CASE = "linear" , _SCREAMING_SNAKE_CASE = 0.0 , ) -> None:
"""simple docstring"""
_UpperCAmelCase = regularization
_UpperCAmelCase = gamma
if kernel == "linear":
_UpperCAmelCase = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
_UpperCAmelCase = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
_UpperCAmelCase = f'''Unknown kernel: {kernel}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_UpperCAmelCase = observations
_UpperCAmelCase = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
(_UpperCAmelCase ) = np.shape(_SCREAMING_SNAKE_CASE )
def to_minimize(_SCREAMING_SNAKE_CASE ) -> float:
_UpperCAmelCase = 0
(_UpperCAmelCase ) = np.shape(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = LinearConstraint(_SCREAMING_SNAKE_CASE , 0 , 0 )
_UpperCAmelCase = Bounds(0 , self.regularization )
_UpperCAmelCase = minimize(
_SCREAMING_SNAKE_CASE , np.ones(_SCREAMING_SNAKE_CASE ) , bounds=_SCREAMING_SNAKE_CASE , constraints=[ly_contraint] ).x
_UpperCAmelCase = l_star
# calculating mean offset of separation plane to points
_UpperCAmelCase = 0
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
_UpperCAmelCase = s / n
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _SCREAMING_SNAKE_CASE )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Tuple = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCamelCase, 'width_multiplier'))
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=64, lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase="swish", lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=None, lowerCamelCase=0.2_5, lowerCamelCase=0.0, lowerCamelCase=0.0, ) -> Any:
"""simple docstring"""
_lowercase : Any = parent
_lowercase : Optional[int] = batch_size
_lowercase : Dict = image_size
_lowercase : str = patch_size
_lowercase : Optional[int] = num_channels
_lowercase : Optional[Any] = make_divisible(5_12 * width_multiplier, divisor=8)
_lowercase : str = hidden_act
_lowercase : Dict = conv_kernel_size
_lowercase : int = output_stride
_lowercase : Optional[Any] = classifier_dropout_prob
_lowercase : Tuple = use_labels
_lowercase : int = is_training
_lowercase : Optional[Any] = num_labels
_lowercase : Dict = initializer_range
_lowercase : List[str] = scope
_lowercase : Tuple = width_multiplier
_lowercase : List[str] = ffn_dropout
_lowercase : Dict = attn_dropout
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : Dict = None
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels)
_lowercase : str = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : int = self.num_labels
_lowercase : Optional[int] = MobileViTVaForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
_lowercase : List[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : int = config_and_inputs
_lowercase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ : Dict = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ : List[Any] = False
lowercase_ : Optional[int] = False
lowercase_ : List[Any] = False
lowercase_ : Tuple = False
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = MobileViTVaModelTester(self)
_lowercase : Tuple = MobileViTVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.')
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(lowerCamelCase)
_lowercase : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Any = [*signature.parameters.keys()]
_lowercase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : Optional[Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : List[Any] = outputs.hidden_states
_lowercase : Tuple = 5
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowercase : Optional[int] = 2
for i in range(len(lowerCamelCase)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2)
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Tuple = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Optional[Any] = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = MobileViTVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> Dict:
_lowercase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256')
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256').to(
lowerCamelCase)
_lowercase : Dict = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Tuple = model(**lowerCamelCase)
# verify the logits
_lowercase : Optional[int] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Optional[int] = model.to(lowerCamelCase)
_lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Tuple = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : List[Any] = model(**lowerCamelCase)
_lowercase : str = outputs.logits
# verify the logits
_lowercase : Tuple = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
], device=lowerCamelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Tuple = model.to(lowerCamelCase)
_lowercase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : int = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**lowerCamelCase)
_lowercase : Any = outputs.logits.detach().cpu()
_lowercase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase, target_sizes=[(50, 60)])
_lowercase : Any = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
_lowercase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase)
_lowercase : Optional[int] = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
| 21 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(_a )
class UpperCAmelCase ( _a ):
'''simple docstring'''
def __init__( self : Optional[int] , *__lowercase : Tuple , **__lowercase : str ):
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def snake_case__ ( self : Optional[Any] , __lowercase : Any=None ):
"""simple docstring"""
snake_case_ = {}
if top_k is not None:
snake_case_ = top_k
return {}, {}, postprocess_params
def __call__( self : List[str] , __lowercase : Dict , **__lowercase : List[str] ):
"""simple docstring"""
return super().__call__(__lowercase , **__lowercase )
def snake_case__ ( self : Any , __lowercase : Any ):
"""simple docstring"""
snake_case_ = load_image(__lowercase )
snake_case_ = self.image_processor(images=__lowercase , return_tensors=self.framework )
return model_inputs
def snake_case__ ( self : Optional[int] , __lowercase : Optional[int] ):
"""simple docstring"""
snake_case_ = self.model(**__lowercase )
return model_outputs
def snake_case__ ( self : List[Any] , __lowercase : List[Any] , __lowercase : Tuple=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
snake_case_ = self.model.config.num_labels
if self.framework == "pt":
snake_case_ = model_outputs.logits.softmax(-1 )[0]
snake_case_ = probs.topk(__lowercase )
elif self.framework == "tf":
snake_case_ = stable_softmax(model_outputs.logits , axis=-1 )[0]
snake_case_ = tf.math.top_k(__lowercase , k=__lowercase )
snake_case_ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
snake_case_ = scores.tolist()
snake_case_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowercase , __lowercase )]
| 187 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE : str = "bart"
SCREAMING_SNAKE_CASE : Optional[int] = True
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> int:
if LOAD_DENSE_INDEX:
_lowercase : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
_lowercase : Union[str, Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
_lowercase : str = qar_model.eval()
else:
_lowercase , _lowercase : Any = (None, None)
if MODEL_TYPE == "bart":
_lowercase : Dict = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
_lowercase : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
_lowercase : Any = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
_lowercase : List[Any] = sas_model.eval()
else:
_lowercase , _lowercase : Union[str, Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> str:
if LOAD_DENSE_INDEX:
_lowercase : Optional[Any] = faiss.StandardGpuResources()
_lowercase : Optional[int] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
_lowercase : Tuple = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
_lowercase : Any = faiss.IndexFlatIP(128 )
_lowercase : Union[str, Any] = faiss.index_cpu_to_gpu(lowerCamelCase_ , 1 , lowerCamelCase_ )
wikiaab_gpu_index_flat.add(lowerCamelCase_ ) # TODO fix for larger GPU
else:
_lowercase , _lowercase : Any = (None, None)
_lowercase : List[str] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> Any:
_lowercase : List[str] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
_lowercase : Optional[Any] = elia['train_eli5']
_lowercase : Tuple = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
_lowercase : Union[str, Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCamelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = load_train_data()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=10 ) -> List[str]:
_lowercase : Any = embed_questions_for_retrieval([question] , lowerCamelCase_ , lowerCamelCase_ )
_lowercase , _lowercase : List[str] = eli5_train_q_index.search(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = [elia_train[int(lowerCamelCase_ )] for i in I[0]]
return nn_examples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_="wiki40b" , lowerCamelCase_="dense" , lowerCamelCase_=10 ) -> Dict:
if source == "none":
_lowercase , _lowercase : Union[str, Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_lowercase , _lowercase : Dict = query_qa_dense_index(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
_lowercase , _lowercase : str = query_es_index(
lowerCamelCase_ , lowerCamelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=lowerCamelCase_ , )
_lowercase : List[Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
_lowercase : Union[str, Any] = 'question: {} context: {}'.format(lowerCamelCase_ , lowerCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase_ : None),
} )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_=256 , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=0.95 , lowerCamelCase_=0.8 ) -> Dict:
with torch.no_grad():
_lowercase : str = qa_sas_generate(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_answers=1 , num_beams=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ , do_sample=lowerCamelCase_ , temp=lowerCamelCase_ , top_p=lowerCamelCase_ , top_k=lowerCamelCase_ , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
SCREAMING_SNAKE_CASE : Union[str, Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
SCREAMING_SNAKE_CASE : List[Any] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE : Any = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
SCREAMING_SNAKE_CASE : Optional[int] = st.sidebar.checkbox("Demo options")
if demo_options:
SCREAMING_SNAKE_CASE : List[str] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE : Optional[int] = action_list.index(action_st)
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
SCREAMING_SNAKE_CASE : int = show_type == "Show full text of passages"
else:
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
SCREAMING_SNAKE_CASE : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
SCREAMING_SNAKE_CASE : int = "wiki40b"
SCREAMING_SNAKE_CASE : int = "dense"
SCREAMING_SNAKE_CASE : str = "beam"
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : List[str] = 64
SCREAMING_SNAKE_CASE : Union[str, Any] = 256
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Generation options")
if generate_options:
SCREAMING_SNAKE_CASE : Any = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE : List[Any] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE : int = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE : int = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : Any = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : str = None
# start main text
SCREAMING_SNAKE_CASE : List[str] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
SCREAMING_SNAKE_CASE : str = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE : List[str] = st.text_input("Enter your question here:", "")
else:
SCREAMING_SNAKE_CASE : Optional[int] = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = make_support(question, source=wiki_source, method="dense", n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method="sparse", n_results=10)
SCREAMING_SNAKE_CASE : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE : Optional[Any] = support_list[:10]
SCREAMING_SNAKE_CASE : int = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE : Optional[Any] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
SCREAMING_SNAKE_CASE : List[Any] = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE : Union[str, Any] = "[{}]({})".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE : Any = sec_titles.split(" & ")
SCREAMING_SNAKE_CASE : List[Any] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE : str = find_nearest_training(question)
SCREAMING_SNAKE_CASE : Any = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
SCREAMING_SNAKE_CASE : str = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
SCREAMING_SNAKE_CASE : Tuple = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 21 | 0 |
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = {}
def lowerCAmelCase__ ( self ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase_ , ''' -> ''' , ''' -> '''.join([str(UpperCamelCase_ ) for j in self.vertex[i]] ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase_ )
else:
# else make a new vertex
UpperCamelCase__ :Optional[Any] = [to_vertex]
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = True
print(UpperCamelCase_ , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 97 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : List[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _lowerCamelCase( _a ):
lowercase_ : Any = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
SCREAMING_SNAKE_CASE : Any = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
SCREAMING_SNAKE_CASE : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class _lowerCamelCase:
def __call__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
elif titles is None or texts is None:
_lowercase : Dict = titles if texts is None else texts
return super().__call__(
lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
_lowercase : Union[str, Any] = titles if not isinstance(lowerCamelCase, lowerCamelCase) else [titles]
_lowercase : Tuple = texts if not isinstance(lowerCamelCase, lowerCamelCase) else [texts]
_lowercase : Optional[Any] = len(lowerCamelCase)
_lowercase : Any = questions if not isinstance(lowerCamelCase, lowerCamelCase) else [questions] * n_passages
if len(lowerCamelCase) != len(lowerCamelCase):
raise ValueError(
F'''There should be as many titles than texts but got {len(lowerCamelCase)} titles and {len(lowerCamelCase)} texts.''')
_lowercase : Any = super().__call__(lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : Tuple = super().__call__(lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : int = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase, lowerCamelCase)
]
}
if return_attention_mask is not False:
_lowercase : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_lowercase : Union[str, Any] = attention_mask
return self.pad(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 16, lowerCamelCase = 64, lowerCamelCase = 4, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : Union[str, Any] = reader_input['input_ids']
_lowercase , _lowercase , _lowercase : Tuple = reader_output[:3]
_lowercase : Tuple = len(lowerCamelCase)
_lowercase : str = sorted(range(lowerCamelCase), reverse=lowerCamelCase, key=relevance_logits.__getitem__)
_lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowercase : str = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_lowercase : Any = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowercase : List[Any] = sequence_ids.index(self.pad_token_id)
else:
_lowercase : List[str] = len(lowerCamelCase)
_lowercase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCamelCase, top_spans=lowerCamelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCamelCase, start_index=lowerCamelCase, end_index=lowerCamelCase, text=self.decode(sequence_ids[start_index : end_index + 1]), ))
if len(lowerCamelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : str = []
for start_index, start_score in enumerate(lowerCamelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_lowercase : Dict = sorted(lowerCamelCase, key=lambda lowerCamelCase: x[1], reverse=lowerCamelCase)
_lowercase : List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''')
_lowercase : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCamelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class _lowerCamelCase( _a, _a ):
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Any = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
lowercase_ : str = ["""input_ids""", """attention_mask"""]
| 21 | 0 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__A = True
except ImportError:
__A = False
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
def __A (_SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _lowerCAmelCase ( _a ):
"""simple docstring"""
@staticmethod
def snake_case ( __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=__UpperCAmelCase , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=__UpperCAmelCase , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=__UpperCAmelCase )
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , *__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = testing
lowerCAmelCase__ :Tuple = testing_file
lowerCAmelCase__ :List[Any] = path
def snake_case ( self ):
'''simple docstring'''
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCAmelCase__ :Tuple = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:2_2]]
if len(__UpperCAmelCase ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
lowerCAmelCase__ :Optional[int] = (
Path(__UpperCAmelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCAmelCase__ :List[Any] = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__UpperCAmelCase ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
lowerCAmelCase__ :Tuple = json.load(__UpperCAmelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__UpperCAmelCase , extra_context=__UpperCAmelCase , )
lowerCAmelCase__ :Dict = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:2_2]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
lowerCAmelCase__ :Dict = json.load(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = configuration['lowercase_modelname']
lowerCAmelCase__ :List[Any] = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F"{directory}/configuration.json" )
lowerCAmelCase__ :List[str] = 'PyTorch' in generate_tensorflow_pytorch_and_flax
lowerCAmelCase__ :Optional[Any] = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
lowerCAmelCase__ :Tuple = 'Flax' in generate_tensorflow_pytorch_and_flax
lowerCAmelCase__ :List[str] = F"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
os.makedirs(F"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=__UpperCAmelCase )
# Tests require submodules as they have parent imports
with open(F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , 'w' ):
pass
shutil.move(
F"{directory}/__init__.py" , F"{model_dir}/__init__.py" , )
shutil.move(
F"{directory}/configuration_{lowercase_model_name}.py" , F"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(__UpperCAmelCase ):
with open(__UpperCAmelCase , 'r' ) as f:
lowerCAmelCase__ :Any = f.readlines()
with open(__UpperCAmelCase , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__UpperCAmelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_{lowercase_model_name}.py" , F"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/test_modeling_{lowercase_model_name}.py" , F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(F"{directory}/modeling_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_{lowercase_model_name}.py" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_tf_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_tf_{lowercase_model_name}.py" , F"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/test_modeling_tf_{lowercase_model_name}.py" , F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(F"{directory}/modeling_tf_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_tf_{lowercase_model_name}.py" )
if output_flax:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_flax_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_flax_{lowercase_model_name}.py" , F"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/test_modeling_flax_{lowercase_model_name}.py" , F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(F"{directory}/modeling_flax_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_flax_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/{lowercase_model_name}.md" , F"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
F"{directory}/tokenization_{lowercase_model_name}.py" , F"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/tokenization_fast_{lowercase_model_name}.py" , F"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
# Create temp file
lowerCAmelCase__ :str = mkstemp()
lowerCAmelCase__ :List[Any] = False
with fdopen(__UpperCAmelCase , 'w' ) as new_file:
with open(__UpperCAmelCase ) as old_file:
for line in old_file:
new_file.write(__UpperCAmelCase )
if line_to_copy_below in line:
lowerCAmelCase__ :Dict = True
for line_to_copy in lines_to_copy:
new_file.write(__UpperCAmelCase )
if not line_found:
raise ValueError(F"Line {line_to_copy_below} was not found in file." )
# Copy the file permissions from the old file to the new file
copymode(__UpperCAmelCase , __UpperCAmelCase )
# Remove original file
remove(__UpperCAmelCase )
# Move new file
move(__UpperCAmelCase , __UpperCAmelCase )
def skip_units(__UpperCAmelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__UpperCAmelCase ):
with open(__UpperCAmelCase ) as datafile:
lowerCAmelCase__ :Any = []
lowerCAmelCase__ :Union[str, Any] = False
lowerCAmelCase__ :Optional[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCAmelCase__ :Tuple = line.split('"' )[1]
lowerCAmelCase__ :Any = skip_units(__UpperCAmelCase )
elif "# Below: " in line and "##" not in line:
lowerCAmelCase__ :Dict = line.split('"' )[1]
lowerCAmelCase__ :List[Any] = skip_units(__UpperCAmelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = []
elif "# Replace with" in line and "##" not in line:
lowerCAmelCase__ :Any = []
elif "##" not in line:
lines_to_copy.append(__UpperCAmelCase )
remove(__UpperCAmelCase )
replace_in_files(F"{directory}/to_replace_{lowercase_model_name}.py" )
os.rmdir(__UpperCAmelCase )
| 293 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not numbers:
return 0
if not isinstance(lowerCamelCase_ , (list, tuple) ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
_lowercase : int = numbers[0]
for i in range(1 , len(lowerCamelCase_ ) ):
# update the maximum and minimum subarray products
_lowercase : Union[str, Any] = numbers[i]
if number < 0:
_lowercase , _lowercase : Any = min_till_now, max_till_now
_lowercase : Union[str, Any] = max(lowerCamelCase_ , max_till_now * number )
_lowercase : Union[str, Any] = min(lowerCamelCase_ , min_till_now * number )
# update the maximum product found till now
_lowercase : Optional[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
return max_prod
| 21 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 126 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE : Tuple = (3, 9, -11, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowerCamelCase:
lowercase_ : int
lowercase_ : Node | None
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Node | None = None
for i in sorted(lowerCamelCase, reverse=lowerCamelCase):
_lowercase : Tuple = Node(lowerCamelCase, self.head)
def __iter__( self) -> Iterator[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.head
while node:
yield node.data
_lowercase : int = node.next_node
def __len__( self) -> int:
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self) -> str:
"""simple docstring"""
return " -> ".join([str(lowerCamelCase) for node in self])
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> SortedLinkedList:
return SortedLinkedList(list(lowerCamelCase_ ) + list(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 21 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = XGLMConfig
__lowerCAmelCase = {}
__lowerCAmelCase = """gelu"""
def __init__( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any]=14 , lowerCamelCase_ : Optional[int]=7 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : Optional[int]=32 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : Optional[int]=4 , lowerCamelCase_ : Dict=37 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Optional[int]=0.0_2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = ffn_dim
UpperCamelCase = activation_function
UpperCamelCase = activation_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = 2
UpperCamelCase = 1
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = self.get_config()
UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase_ , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
UpperCamelCase
) = config_and_inputs
UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( _a , _a , unittest.TestCase ):
__lowerCAmelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__lowerCAmelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFXGLMModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFXGLMModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any]=True ):
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
UpperCamelCase = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
UpperCamelCase = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
UpperCamelCase = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ , seed=[7, 0] )
UpperCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase_ )
UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase = 'left'
# use different length sentences to test batching
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCamelCase = tokenizer(lowerCamelCase_ , return_tensors="""tf""" , padding=lowerCamelCase_ )
UpperCamelCase = inputs['input_ids']
UpperCamelCase = model.generate(input_ids=lowerCamelCase_ , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
UpperCamelCase = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
UpperCamelCase = model.generate(input_ids=lowerCamelCase_ , max_new_tokens=12 )
UpperCamelCase = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
UpperCamelCase = model.generate(input_ids=lowerCamelCase_ , max_new_tokens=12 )
UpperCamelCase = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_ )
UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_ )
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence] )
| 343 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Any = KandinskyImgaImgPipeline
lowercase_ : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
lowercase_ : Any = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
lowercase_ : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ : Union[str, Any] = False
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return 1_00
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=10_05, )
_lowercase : Optional[int] = MultilingualCLIP(lowerCamelCase)
_lowercase : List[str] = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Union[str, Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase : Optional[Any] = UNetaDConditionModel(**lowerCamelCase)
return model
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.dummy_text_encoder
_lowercase : List[Any] = self.dummy_tokenizer
_lowercase : int = self.dummy_unet
_lowercase : int = self.dummy_movq
_lowercase : Optional[int] = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowercase : List[Any] = DDIMScheduler(**lowerCamelCase)
_lowercase : List[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Dict:
"""simple docstring"""
_lowercase : List[str] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[Any] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(lowerCamelCase)
# create init_image
_lowercase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[int] = image.cpu().permute(0, 2, 3, 1)[0]
_lowercase : Tuple = Image.fromarray(np.uinta(lowerCamelCase)).convert('RGB').resize((2_56, 2_56))
if str(lowerCamelCase).startswith('mps'):
_lowercase : List[str] = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = 'cpu'
_lowercase : Tuple = self.get_dummy_components()
_lowercase : str = self.pipeline_class(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = pipe(**self.get_dummy_inputs(lowerCamelCase))
_lowercase : Optional[int] = output.images
_lowercase : List[Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase), return_dict=lowerCamelCase, )[0]
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : Tuple = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy')
_lowercase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
_lowercase : Optional[int] = 'A red cartoon frog, 4k'
_lowercase : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior', torch_dtype=torch.floataa)
pipe_prior.to(lowerCamelCase)
_lowercase : Optional[Any] = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1', torch_dtype=torch.floataa)
_lowercase : List[Any] = pipeline.to(lowerCamelCase)
pipeline.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = torch.Generator(device='cpu').manual_seed(0)
_lowercase , _lowercase : List[Any] = pipe_prior(
lowerCamelCase, generator=lowerCamelCase, num_inference_steps=5, negative_prompt='', ).to_tuple()
_lowercase : Union[str, Any] = pipeline(
lowerCamelCase, image=lowerCamelCase, image_embeds=lowerCamelCase, negative_image_embeds=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=1_00, height=7_68, width=7_68, strength=0.2, output_type='np', )
_lowercase : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase)
| 21 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _SCREAMING_SNAKE_CASE ( a , a , a , a , ) -> list[float]:
__A : Union[str, Any] = coefficient_matrix.shape
__A : Any = constant_matrix.shape
if rowsa != colsa:
__A : Any = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase_ )
if colsa != 1:
__A : List[str] = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase_ )
if rowsa != rowsa:
__A : Any = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase_ )
if len(lowerCamelCase_ ) != rowsa:
__A : Optional[Any] = (
'Number of initial values must be equal to number of rows in coefficient '
F"""matrix but received {len(lowerCamelCase_ )} and {rowsa}"""
)
raise ValueError(lowerCamelCase_ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
__A : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__A : Optional[int] = table.shape
strictly_diagonally_dominant(lowerCamelCase_ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase_ ):
__A : List[str] = []
for row in range(lowerCamelCase_ ):
__A : List[str] = 0
for col in range(lowerCamelCase_ ):
if col == row:
__A : Union[str, Any] = table[row][col]
elif col == cols - 1:
__A : Optional[int] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__A : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase_ )
__A : Any = new_val
return [float(lowerCamelCase_ ) for i in new_val]
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
__A : Any = table.shape
__A : List[Any] = True
for i in range(0 , lowerCamelCase_ ):
__A : Optional[Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _lowerCamelCase( _a ):
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
super().__init__(*lowerCamelCase, **lowerCamelCase)
requires_backends(self, 'vision')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def UpperCamelCase ( self, lowerCamelCase=None) -> int:
"""simple docstring"""
_lowercase : Dict = {}
if top_k is not None:
_lowercase : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self, lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
return super().__call__(lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = load_image(lowerCamelCase)
_lowercase : List[str] = self.image_processor(images=lowerCamelCase, return_tensors=self.framework)
return model_inputs
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.model(**lowerCamelCase)
return model_outputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=5) -> Dict:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowercase : List[Any] = self.model.config.num_labels
if self.framework == "pt":
_lowercase : int = model_outputs.logits.softmax(-1)[0]
_lowercase , _lowercase : Union[str, Any] = probs.topk(lowerCamelCase)
elif self.framework == "tf":
_lowercase : int = stable_softmax(model_outputs.logits, axis=-1)[0]
_lowercase : List[Any] = tf.math.top_k(lowerCamelCase, k=lowerCamelCase)
_lowercase , _lowercase : Any = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''')
_lowercase : str = scores.tolist()
_lowercase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase, lowerCamelCase)]
| 21 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class a ( _a ):
UpperCamelCase : Dict = """longformer"""
def __init__( self : Dict , lowerCAmelCase : Dict = 512 , lowerCAmelCase : Dict = 2 , lowerCAmelCase : Optional[Any] = 1 , lowerCAmelCase : List[Any] = 0 , lowerCAmelCase : Optional[int] = 2 , lowerCAmelCase : Tuple = 3_0522 , lowerCAmelCase : Union[str, Any] = 768 , lowerCAmelCase : Optional[Any] = 12 , lowerCAmelCase : List[str] = 12 , lowerCAmelCase : Tuple = 3072 , lowerCAmelCase : Any = "gelu" , lowerCAmelCase : Dict = 0.1 , lowerCAmelCase : int = 0.1 , lowerCAmelCase : List[str] = 512 , lowerCAmelCase : Optional[Any] = 2 , lowerCAmelCase : List[Any] = 0.0_2 , lowerCAmelCase : Optional[Any] = 1E-12 , lowerCAmelCase : List[Any] = False , **lowerCAmelCase : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_window
SCREAMING_SNAKE_CASE_: Union[str, Any] =sep_token_id
SCREAMING_SNAKE_CASE_: int =bos_token_id
SCREAMING_SNAKE_CASE_: Tuple =eos_token_id
SCREAMING_SNAKE_CASE_: List[Any] =vocab_size
SCREAMING_SNAKE_CASE_: Tuple =hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_act
SCREAMING_SNAKE_CASE_: Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE_: int =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_: int =type_vocab_size
SCREAMING_SNAKE_CASE_: str =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =onnx_export
class a ( _a ):
def __init__( self : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Any = "default" , lowerCAmelCase : Optional[int] = None ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =True
@property
def lowerCamelCase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def lowerCamelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE_: List[Any] ={0: 'batch'}
return outputs
@property
def lowerCamelCase__ ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int = -1 , lowerCAmelCase : Optional[int] = -1 , lowerCAmelCase : Optional[Any] = False , lowerCAmelCase : Union[str, Any] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =super().generate_dummy_inputs(
preprocessor=lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE_: Any =torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
SCREAMING_SNAKE_CASE_: List[str] =1
return inputs
| 173 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
_lowercase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase_( ) -> Optional[int]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case = None ,__snake_case = None ,__snake_case = None ,) -> Any:
'''simple docstring'''
if config_name_or_path is None:
lowerCamelCase__ = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
lowerCamelCase__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase__ = question_encoder_name_or_path
lowerCamelCase__ = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
lowerCamelCase__ = RagConfig.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ = AutoConfig.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ = AutoConfig.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ = gen_config
lowerCamelCase__ = question_encoder_config
lowerCamelCase__ = model_class.from_pretrained_question_encoder_generator(
lowerCamelCase_ ,lowerCamelCase_ ,config=lowerCamelCase_ )
rag_model.save_pretrained(lowerCamelCase_ )
# Sanity check.
model_class.from_pretrained(lowerCamelCase_ )
# Save tokenizers.
lowerCamelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
lowerCamelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
_a = parser.parse_args()
_a = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 209 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=64, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=3, lowerCamelCase=4, lowerCamelCase=None, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase=1, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Any = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Optional[Any] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : str = vocab_size
_lowercase : List[str] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : List[Any] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Any = num_choices
_lowercase : Tuple = scope
_lowercase : Optional[Any] = q_groups
_lowercase : List[str] = k_groups
_lowercase : Optional[int] = v_groups
_lowercase : List[str] = post_attention_groups
_lowercase : Union[str, Any] = intermediate_groups
_lowercase : int = output_groups
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Any = None
if self.use_input_mask:
_lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : Dict = None
_lowercase : int = None
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = SqueezeBertModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase, lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Dict = SqueezeBertForMaskedLM(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForQuestionAnswering(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_labels
_lowercase : int = SqueezeBertForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Any = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.num_labels
_lowercase : List[str] = SqueezeBertForTokenClassification(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = self.num_choices
_lowercase : str = SqueezeBertForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Dict = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase_ : Optional[int] = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Tuple = False
lowercase_ : List[str] = True
lowercase_ : int = False
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = SqueezeBertModelTester(self)
_lowercase : Dict = ConfigTester(self, config_class=lowerCamelCase, dim=37)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = SqueezeBertModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli')
_lowercase : Optional[int] = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]])
_lowercase : List[str] = model(lowerCamelCase)[0]
_lowercase : Union[str, Any] = torch.Size((1, 3))
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : Tuple = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]])
self.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-4))
| 21 | 0 |
def lowercase__ ( __snake_case : List[Any] = 100 ):
'''simple docstring'''
UpperCAmelCase_ : int = set()
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : int = n + 1 # maximum limit
for a in range(2 , lowerCamelCase_ ):
for b in range(2 , lowerCamelCase_ ):
UpperCAmelCase_ : Tuple = a**b # calculates the current power
collect_powers.add(lowerCamelCase_ ) # adds the result to the set
return len(lowerCamelCase_ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 29 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Dict = JukeboxTokenizer
lowercase_ : Dict = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
import torch
_lowercase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics')
_lowercase : Optional[Any] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
@require_torch
def UpperCamelCase ( self) -> int:
"""simple docstring"""
import torch
_lowercase : List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics')
_lowercase : List[str] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
| 21 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 212 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowercase_ : int = """ssube/stable-diffusion-x4-upscaler-onnx"""
def UpperCamelCase ( self, lowerCamelCase=0) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = floats_tensor((1, 3, 1_28, 1_28), rng=random.Random(lowerCamelCase))
_lowercase : Union[str, Any] = torch.manual_seed(lowerCamelCase)
_lowercase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : str = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : List[Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : int = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[Any] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs()
_lowercase : List[str] = pipe(**lowerCamelCase).images
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
# using the PNDM scheduler by default
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np', )
_lowercase : List[Any] = output.images
_lowercase : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : List[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
_lowercase : str = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', subfolder='scheduler')
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', scheduler=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=20, generator=lowerCamelCase, output_type='np', )
_lowercase : str = output.images
_lowercase : str = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 21 | 0 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> Any:
'''simple docstring'''
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __a :
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TFVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {'vision_model': vision_model, 'text_model': text_model}
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = after_output[0].numpy()
_UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5 )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = to_atuple(vision_model.config.image_size )
_UpperCAmelCase = to_atuple(vision_model.config.patch_size )
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.get_pretrained_model_and_inputs()
_UpperCAmelCase = model_a(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model_a(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = after_outputs[0].numpy()
_UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5 )
@require_tf
class __a ( _a , unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
_UpperCAmelCase = 13
_UpperCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCAmelCase = random_attention_mask([batch_size, 4] )
_UpperCAmelCase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = TFViTModel(_SCREAMING_SNAKE_CASE , name='vision_model' )
_UpperCAmelCase = TFBertModel(_SCREAMING_SNAKE_CASE , name='text_model' )
return vision_model, text_model
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = TFViTModelTester(self )
_UpperCAmelCase = TFBertModelTester(self )
_UpperCAmelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = vision_config_and_inputs
(
_UpperCAmelCase
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __a ( _a , unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
_UpperCAmelCase = 13
_UpperCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCAmelCase = random_attention_mask([batch_size, 4] )
_UpperCAmelCase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_UpperCAmelCase = to_atuple(vision_model.config.image_size )
_UpperCAmelCase = to_atuple(vision_model.config.patch_size )
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDeiTModel(_SCREAMING_SNAKE_CASE , name='vision_model' )
_UpperCAmelCase = TFRobertaModel(_SCREAMING_SNAKE_CASE , name='text_model' )
return vision_model, text_model
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = TFDeiTModelTester(self )
_UpperCAmelCase = TFRobertaModelTester(self )
_UpperCAmelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = vision_config_and_inputs
(
_UpperCAmelCase
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __a ( _a , unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
_UpperCAmelCase = 13
_UpperCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCAmelCase = random_attention_mask([batch_size, 4] )
_UpperCAmelCase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = TFCLIPVisionModel(_SCREAMING_SNAKE_CASE , name='vision_model' )
_UpperCAmelCase = TFBertModel(_SCREAMING_SNAKE_CASE , name='text_model' )
return vision_model, text_model
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFCLIPVisionModelTester(self )
_UpperCAmelCase = TFBertModelTester(self )
_UpperCAmelCase = clip_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = vision_config_and_inputs
(
_UpperCAmelCase
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __a ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_UpperCAmelCase = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='np' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCAmelCase = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 329 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = 1
_lowercase : Any = 3
_lowercase : Tuple = (32, 32)
_lowercase : Tuple = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(lowerCamelCase)
return image
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
return model
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : str = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
return model
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = RobertaSeriesConfig(
hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=50_06, )
return RobertaSeriesModelWithTransformation(lowerCamelCase)
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
def extract(*lowerCamelCase, **lowerCamelCase):
class _lowerCamelCase:
def __init__( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = torch.ones([0])
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
self.pixel_values.to(lowerCamelCase)
return self
return Out()
return extract
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : List[Any] = self.dummy_cond_unet
_lowercase : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase)
_lowercase : Optional[Any] = self.dummy_vae
_lowercase : List[Any] = self.dummy_text_encoder
_lowercase : Any = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
_lowercase : Tuple = 77
_lowercase : int = self.dummy_image.to(lowerCamelCase)
_lowercase : int = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowercase : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
_lowercase : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase)
_lowercase : Optional[int] = alt_pipe.to(lowerCamelCase)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = 'A painting of a squirrel eating a burger'
_lowercase : Dict = torch.Generator(device=lowerCamelCase).manual_seed(0)
_lowercase : Any = alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='np', image=lowerCamelCase, )
_lowercase : Optional[int] = output.images
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(0)
_lowercase : Optional[Any] = alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='np', image=lowerCamelCase, return_dict=lowerCamelCase, )[0]
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
_lowercase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = self.dummy_cond_unet
_lowercase : Tuple = PNDMScheduler(skip_prk_steps=lowerCamelCase)
_lowercase : str = self.dummy_vae
_lowercase : Optional[Any] = self.dummy_text_encoder
_lowercase : Optional[Any] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
_lowercase : Optional[Any] = 77
_lowercase : str = self.dummy_image.to(lowerCamelCase)
# put models in fp16
_lowercase : List[str] = unet.half()
_lowercase : List[Any] = vae.half()
_lowercase : Any = bert.half()
# make sure here that pndm scheduler skips prk
_lowercase : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
_lowercase : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase)
_lowercase : Any = alt_pipe.to(lowerCamelCase)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : int = 'A painting of a squirrel eating a burger'
_lowercase : Optional[Any] = torch.manual_seed(0)
_lowercase : Union[str, Any] = alt_pipe(
[prompt], generator=lowerCamelCase, num_inference_steps=2, output_type='np', image=lowerCamelCase, ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
# resize to resolution that is divisible by 8 but not 16 or 32
_lowercase : str = init_image.resize((7_60, 5_04))
_lowercase : Optional[int] = 'BAAI/AltDiffusion'
_lowercase : str = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
pipe.enable_attention_slicing()
_lowercase : List[str] = 'A fantasy landscape, trending on artstation'
_lowercase : Any = torch.manual_seed(0)
_lowercase : Dict = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCamelCase, output_type='np', )
_lowercase : List[str] = output.images[0]
_lowercase : Tuple = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_lowercase : Optional[Any] = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : str = init_image.resize((7_68, 5_12))
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy')
_lowercase : str = 'BAAI/AltDiffusion'
_lowercase : Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
pipe.enable_attention_slicing()
_lowercase : int = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : int = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCamelCase, output_type='np', )
_lowercase : Union[str, Any] = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1E-2
| 21 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.