code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : int , *__UpperCAmelCase : Any , **__UpperCAmelCase : Optional[int] ) ->None:
"""simple docstring"""
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any, UpperCAmelCase__ : int ):
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
self.m_edges.append([u_node, v_node, weight] )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowercase ( self : List[Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase__ )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(UpperCAmelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase__ )
def _lowercase ( self : Any ):
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _A ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __A ( UpperCamelCase__ ):
a__ : Optional[int] = """philschmid/bart-large-cnn-samsum"""
a__ : str = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
a__ : Optional[int] = """summarizer"""
a__ : str = AutoTokenizer
a__ : Optional[int] = AutoModelForSeqaSeqLM
a__ : Optional[int] = ["""text"""]
a__ : Optional[int] = ["""text"""]
def _lowercase (self : Dict , __a : int ):
return self.pre_processor(__a , return_tensors="pt" , truncation=__a )
def _lowercase (self : Optional[Any] , __a : Optional[int] ):
return self.model.generate(**__a )[0]
def _lowercase (self : List[str] , __a : Any ):
return self.pre_processor.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
| 1 |
"""simple docstring"""
from math import sqrt
def _A ( UpperCamelCase_ : int) -> int:
'''simple docstring'''
__lowercase = 0
for i in range(1, int(sqrt(UpperCamelCase_) + 1)):
if n % i == 0 and i != sqrt(UpperCamelCase_):
total += i + n // i
elif i == sqrt(UpperCamelCase_):
total += i
return total - n
def _A ( UpperCamelCase_ : int = 10000) -> int:
'''simple docstring'''
__lowercase = sum(
i
for i in range(1, UpperCamelCase_)
if sum_of_divisors(sum_of_divisors(UpperCamelCase_)) == i and sum_of_divisors(UpperCamelCase_) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 17 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowercase__ = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=UpperCamelCase , cache_dir=UpperCamelCase )
lowercase__ = [t[-1] for t in os.walk(os.path.join(UpperCamelCase , os.listdir(UpperCamelCase )[0] , '''snapshots''' ) )]
lowercase__ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=UpperCamelCase )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 4
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(UpperCamelCase )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase )
lowercase__ = jax.random.split(UpperCamelCase , UpperCamelCase )
lowercase__ = shard(UpperCamelCase )
lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(UpperCamelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
lowercase__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCamelCase ) == num_samples
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=UpperCamelCase )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(UpperCamelCase )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase )
lowercase__ = jax.random.split(UpperCamelCase , UpperCamelCase )
lowercase__ = shard(UpperCamelCase )
lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(UpperCamelCase )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase )
lowercase__ = jax.random.split(UpperCamelCase , UpperCamelCase )
lowercase__ = shard(UpperCamelCase )
lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(UpperCamelCase )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase )
lowercase__ = jax.random.split(UpperCamelCase , UpperCamelCase )
lowercase__ = shard(UpperCamelCase )
lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=UpperCamelCase , steps_offset=1 , )
lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , )
lowercase__ = scheduler.create_state()
lowercase__ = scheduler_state
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(UpperCamelCase )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase )
lowercase__ = jax.random.split(UpperCamelCase , UpperCamelCase )
lowercase__ = shard(UpperCamelCase )
lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = jax.random.split(jax.random.PRNGKey(0 ) , UpperCamelCase )
lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase , )
lowercase__ = replicate(UpperCamelCase )
lowercase__ = pipeline.prepare_inputs(UpperCamelCase )
lowercase__ = shard(UpperCamelCase )
lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
lowercase__ = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase , use_memory_efficient_attention=UpperCamelCase , )
lowercase__ = replicate(UpperCamelCase )
lowercase__ = pipeline.prepare_inputs(UpperCamelCase )
lowercase__ = shard(UpperCamelCase )
lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
lowercase__ = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 2 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a = _symbol_database.Default()
_a = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
_a = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a = None
_a = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a = 45
_a = 15_81
_a = 15_17
_a = 15_70
_a = 15_84
_a = 17_93
_a = 17_95
_a = 19_16
_a = 18_64
_a = 19_05
_a = 19_19
_a = 24_29
_a = 22_08
_a = 24_18
_a = 23_23
_a = 24_07
# @@protoc_insertion_point(module_scope)
| 17 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[str] = MobileBertConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
A : Optional[Any] = MobileBertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
A : Optional[int] = load_tf_weights_in_mobilebert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 3 |
"""simple docstring"""
import baseaa
def _A ( UpperCamelCase_ : str) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8"))
def _A ( UpperCamelCase_ : bytes) -> str:
'''simple docstring'''
return baseaa.baadecode(UpperCamelCase_).decode("utf-8")
if __name__ == "__main__":
_a = 'Hello World!'
_a = baseaa_encode(test)
print(encoded)
_a = baseaa_decode(encoded)
print(decoded)
| 17 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = '''levit'''
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any]=2_2_4 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : int=[1_2_8, 2_5_6, 3_8_4] , UpperCAmelCase__ : str=[4, 8, 1_2] , UpperCAmelCase__ : Optional[int]=[4, 4, 4] , UpperCAmelCase__ : Optional[Any]=[1_6, 1_6, 1_6] , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=[2, 2, 2] , UpperCAmelCase__ : List[str]=[2, 2, 2] , UpperCAmelCase__ : Dict=0.02 , **UpperCAmelCase__ : Optional[int] , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase__ )
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = kernel_size
lowerCAmelCase = stride
lowerCAmelCase = padding
lowerCAmelCase = hidden_sizes
lowerCAmelCase = num_attention_heads
lowerCAmelCase = depths
lowerCAmelCase = key_dim
lowerCAmelCase = drop_path_rate
lowerCAmelCase = patch_size
lowerCAmelCase = attention_ratio
lowerCAmelCase = mlp_ratio
lowerCAmelCase = initializer_range
lowerCAmelCase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : int = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : Dict ) -> float:
return 1E-4
| 4 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Any) -> List[str]:
'''simple docstring'''
__lowercase ,__lowercase = [], []
while len(UpperCamelCase_) > 1:
__lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_)
start.append(UpperCamelCase_)
end.append(UpperCamelCase_)
collection.remove(UpperCamelCase_)
collection.remove(UpperCamelCase_)
end.reverse()
return start + collection + end
if __name__ == "__main__":
_a = input('Enter numbers separated by a comma:\n').strip()
_a = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 17 | 0 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCAmelCase_ ( __snake_case ) -> Any:
"""simple docstring"""
_lowercase =[False] * len(__snake_case )
_lowercase =[-1] * len(__snake_case )
def dfs(__snake_case , __snake_case ):
_lowercase =True
_lowercase =c
for u in graph[v]:
if not visited[u]:
dfs(__snake_case , 1 - c )
for i in range(len(__snake_case ) ):
if not visited[i]:
dfs(__snake_case , 0 )
for i in range(len(__snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 5 |
"""simple docstring"""
def _A ( UpperCamelCase_ : list[int]) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty")
__lowercase = sum(UpperCamelCase_) / len(UpperCamelCase_) # Calculate the average
return sum(abs(x - average) for x in nums) / len(UpperCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
A : List[Any] = False
@skip_mps
class __A( a , a , a , unittest.TestCase ):
snake_case_ = StableDiffusionAttendAndExcitePipeline
snake_case_ = False
snake_case_ = TEXT_TO_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> List[str]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> int:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_snake_case , )
__a = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__a = CLIPTextModel(_snake_case )
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=0 ) -> Any:
'''simple docstring'''
if str(_snake_case ).startswith('''mps''' ):
__a = torch.manual_seed(_snake_case )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__a = __a = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = '''cpu'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs(_snake_case )
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__a = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] )
__a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_snake_case , 1E-3 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class __A( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Dict:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = torch.manual_seed(51 )
__a = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=_snake_case , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
__a = '''a painting of an elephant with glasses'''
__a = [5, 7]
__a = pipe(
prompt=_snake_case , token_indices=_snake_case , guidance_scale=7.5 , generator=_snake_case , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1 | 6 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ):
__lowercase = parent
__lowercase = vocab_size
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def _lowercase ( self : int ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size )
__lowercase = BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, )
return config, pixel_values, labels
def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ):
__lowercase = FlaxBeitModel(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ):
__lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ):
__lowercase = self.type_sequence_label_size
__lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _lowercase ( self : List[Any] ):
__lowercase = FlaxBeitModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(UpperCAmelCase__ )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1], UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ):
return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape, output.shape )
def _lowercase ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(UpperCAmelCase__ )
def _A ( ) -> str:
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_vision
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values
# prepare bool_masked_pos
__lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ )
# forward pass
__lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) )
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_0_0_0)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_8_1
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[str] ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 2_1_8_4_1)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
| 17 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "spiece.model"}
lowercase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
lowercase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
lowercase_ = 0
lowercase_ = 1
lowercase_ = 2
lowercase_ = 3
lowercase_ = 4
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = 'left'
def __init__( self : Dict,lowercase_ : List[Any],lowercase_ : Dict=False,lowercase_ : List[str]=True,lowercase_ : Any=False,lowercase_ : Optional[int]="<s>",lowercase_ : List[str]="</s>",lowercase_ : List[str]="<unk>",lowercase_ : str="<sep>",lowercase_ : str="<pad>",lowercase_ : List[str]="<cls>",lowercase_ : Dict="<mask>",lowercase_ : Tuple=["<eop>", "<eod>"],lowercase_ : Optional[Dict[str, Any]] = None,**lowercase_ : Optional[Any],)-> None:
'''simple docstring'''
A__ = AddedToken(lowercase_,lstrip=lowercase_,rstrip=lowercase_ ) if isinstance(lowercase_,lowercase_ ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase_,remove_space=lowercase_,keep_accents=lowercase_,bos_token=lowercase_,eos_token=lowercase_,unk_token=lowercase_,sep_token=lowercase_,pad_token=lowercase_,cls_token=lowercase_,mask_token=lowercase_,additional_special_tokens=lowercase_,sp_model_kwargs=self.sp_model_kwargs,**lowercase_,)
A__ = 3
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def snake_case__ ( self : List[str] )-> Optional[Any]:
'''simple docstring'''
return len(self.sp_model )
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : Optional[Any],lowercase_ : Optional[int] )-> Union[str, Any]:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self,'sp_model_kwargs' ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : int,lowercase_ : List[Any] )-> List[Any]:
'''simple docstring'''
if self.remove_space:
A__ = ' '.join(inputs.strip().split() )
else:
A__ = inputs
A__ = outputs.replace('``','"' ).replace('\'\'','"' )
if not self.keep_accents:
A__ = unicodedata.normalize('NFKD',lowercase_ )
A__ = ''.join([c for c in outputs if not unicodedata.combining(lowercase_ )] )
if self.do_lower_case:
A__ = outputs.lower()
return outputs
def snake_case__ ( self : Optional[Any],lowercase_ : str )-> List[str]:
'''simple docstring'''
A__ = self.preprocess_text(lowercase_ )
A__ = self.sp_model.encode(lowercase_,out_type=lowercase_ )
A__ = []
for piece in pieces:
if len(lowercase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
A__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase_,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ = cur_pieces[1:]
else:
A__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase_ )
else:
new_pieces.append(lowercase_ )
return new_pieces
def snake_case__ ( self : Dict,lowercase_ : Any )-> Optional[Any]:
'''simple docstring'''
return self.sp_model.PieceToId(lowercase_ )
def snake_case__ ( self : Optional[Any],lowercase_ : Any )-> Optional[int]:
'''simple docstring'''
return self.sp_model.IdToPiece(lowercase_ )
def snake_case__ ( self : Optional[Any],lowercase_ : str )-> Union[str, Any]:
'''simple docstring'''
A__ = ''.join(lowercase_ ).replace(lowercase_,' ' ).strip()
return out_string
def snake_case__ ( self : Union[str, Any],lowercase_ : List[int],lowercase_ : bool = False,lowercase_ : bool = None,lowercase_ : bool = True,**lowercase_ : List[str],)-> str:
'''simple docstring'''
A__ = kwargs.pop('use_source_tokenizer',lowercase_ )
A__ = self.convert_ids_to_tokens(lowercase_,skip_special_tokens=lowercase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A__ = []
A__ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase_ ) )
A__ = []
sub_texts.append(lowercase_ )
else:
current_sub_text.append(lowercase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A__ = ''.join(lowercase_ )
A__ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A__ = self.clean_up_tokenization(lowercase_ )
return clean_text
else:
return text
def snake_case__ ( self : List[Any],lowercase_ : List[int],lowercase_ : Optional[List[int]] = None )-> List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case__ ( self : str,lowercase_ : List[int],lowercase_ : Optional[List[int]] = None,lowercase_ : bool = False )-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_,token_ids_a=lowercase_,already_has_special_tokens=lowercase_ )
if token_ids_a is not None:
return ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1, 1]
return ([0] * len(lowercase_ )) + [1, 1]
def snake_case__ ( self : List[str],lowercase_ : List[int],lowercase_ : Optional[List[int]] = None )-> List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case__ ( self : List[Any],lowercase_ : str,lowercase_ : Optional[str] = None )-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
lowercase_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_,'wb' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 7 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowerCAmelCase ( unittest.TestCase ,lowercase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
__lowercase = load_tool("text-classification" )
self.tool.setup()
__lowercase = load_tool("text-classification", remote=UpperCAmelCase__ )
def _lowercase ( self : str ):
__lowercase = self.tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : str ):
__lowercase = self.remote_tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : List[str] ):
__lowercase = self.tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : Tuple ):
__lowercase = self.remote_tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
| 17 | 0 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
snake_case_ = 128
elif "12-12" in model_name:
snake_case_ = 12
snake_case_ = 12
elif "14-14" in model_name:
snake_case_ = 14
snake_case_ = 14
elif "16-16" in model_name:
snake_case_ = 16
snake_case_ = 16
else:
raise ValueError('''Model not supported''' )
snake_case_ = '''huggingface/label-files'''
if "speech-commands" in model_name:
snake_case_ = 35
snake_case_ = '''speech-commands-v2-id2label.json'''
else:
snake_case_ = 527
snake_case_ = '''audioset-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if "module.v" in name:
snake_case_ = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
snake_case_ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
snake_case_ = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
snake_case_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
snake_case_ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
snake_case_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case_ = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
snake_case_ = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
snake_case_ = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
snake_case_ = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
snake_case_ = key.split('''.''' )
snake_case_ = int(key_split[3] )
snake_case_ = config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = val
return orig_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
snake_case_ = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ )
snake_case_ = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
snake_case_ = model_name_to_url[model_name]
snake_case_ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE__ )
# rename some keys
snake_case_ = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load 🤗 model
snake_case_ = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
snake_case_ = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978
snake_case_ = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526
snake_case_ = 1024 if '''speech-commands''' not in model_name else 128
snake_case_ = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
if "speech-commands" in model_name:
snake_case_ = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
snake_case_ = dataset[0]['''audio''']['''array''']
else:
snake_case_ = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
snake_case_, snake_case_ = torchaudio.load(SCREAMING_SNAKE_CASE__ )
snake_case_ = waveform.squeeze().numpy()
snake_case_ = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=16000 , return_tensors='''pt''' )
# forward pass
snake_case_ = model(**SCREAMING_SNAKE_CASE__ )
snake_case_ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
snake_case_ = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
snake_case_ = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
snake_case_ = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
snake_case_ = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
snake_case_ = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
snake_case_ = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
snake_case_ = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
snake_case_ = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 8 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_a = 'CompVis/stable-diffusion-v1-1'
_a = 'CompVis/stable-diffusion-v1-2'
_a = 'CompVis/stable-diffusion-v1-3'
_a = 'CompVis/stable-diffusion-v1-4'
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCAmelCase__ : StableDiffusionSafetyChecker, UpperCAmelCase__ : CLIPImageProcessor, UpperCAmelCase__ : bool = True, ):
super()._init_()
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, requires_safety_checker=UpperCAmelCase__, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea )
@property
def _lowercase ( self : List[str] ):
return {k: getattr(self, UpperCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )}
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Tuple, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Any, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Optional[int], ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
__lowercase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(UpperCAmelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 17 | 0 |
from PIL import Image
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = image.size
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = image.load()
for i in range(lowercase__ ):
for j in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowercase__ ):
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCAmelCase : Dict =mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 9 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = "ssube/stable-diffusion-x4-upscaler-onnx"
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[str]=0 ):
__lowercase = floats_tensor((1, 3, 1_2_8, 1_2_8), rng=random.Random(UpperCAmelCase__ ) )
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _lowercase ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : int ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : str ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : Dict ):
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _lowercase ( self : Dict ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowercase ( self : str ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
__lowercase = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" )
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", scheduler=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 17 | 0 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = "hf-internal-testing/tiny-random-bert"
__A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any =cached_file(UpperCAmelCase_ , UpperCAmelCase_)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase_))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_)))
with open(os.path.join(UpperCAmelCase_ , "refs" , "main")) as f:
lowerCamelCase__: List[Any] =f.read()
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , "snapshots" , UpperCAmelCase_ , UpperCAmelCase_))
self.assertTrue(os.path.isfile(UpperCAmelCase_))
# File is cached at the same place the second time.
lowerCamelCase__: Optional[Any] =cached_file(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
# Using a specific revision to test the full commit hash.
lowerCamelCase__: str =cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision="9b8c223")
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , "snapshots" , UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid model identifier"):
lowerCamelCase__: List[Any] =cached_file("tiny-random-bert" , UpperCAmelCase_)
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid git identifier"):
lowerCamelCase__: List[str] =cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision="aaaa")
with self.assertRaisesRegex(UpperCAmelCase_ , "does not appear to have a file named"):
lowerCamelCase__: Optional[Any] =cached_file(UpperCAmelCase_ , "conf")
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[str]:
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase_ , "does not appear to have a file named"):
lowerCamelCase__: Dict =cached_file(UpperCAmelCase_ , "conf")
with open(os.path.join(UpperCAmelCase_ , "refs" , "main")) as f:
lowerCamelCase__: List[str] =f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , ".no_exist" , UpperCAmelCase_ , "conf")))
lowerCamelCase__: Union[str, Any] =cached_file(UpperCAmelCase_ , "conf" , _raise_exceptions_for_missing_entries=UpperCAmelCase_)
self.assertIsNone(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =cached_file(UpperCAmelCase_ , "conf" , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_)
self.assertIsNone(UpperCAmelCase_)
lowerCamelCase__: List[Any] =mock.Mock()
lowerCamelCase__: int =500
lowerCamelCase__: Union[str, Any] ={}
lowerCamelCase__: Any =HTTPError
lowerCamelCase__: List[Any] ={}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase_) as mock_head:
lowerCamelCase__: int =cached_file(UpperCAmelCase_ , "conf" , _raise_exceptions_for_connection_errors=UpperCAmelCase_)
self.assertIsNone(UpperCAmelCase_)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Tuple:
'''simple docstring'''
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase_))
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase_))
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[str]:
'''simple docstring'''
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt"))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid model identifier"):
get_file_from_repo("bert-base-case" , UpperCAmelCase_)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid git identifier"):
get_file_from_repo("bert-base-cased" , UpperCAmelCase_ , revision="ahaha")
lowerCamelCase__: List[str] =get_file_from_repo("bert-base-cased" , UpperCAmelCase_)
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCamelCase__: List[str] =json.loads(open(UpperCAmelCase_ , "r").read())
self.assertEqual(config["hidden_size"] , 768)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__: List[Any] =Path(UpperCAmelCase_) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase_ , "a.txt") , str(UpperCAmelCase_))
self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , "b.txt"))
| 10 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_a = datasets.utils.logging.get_logger(__name__)
_a = ['names', 'prefix']
_a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_a = ['encoding_errors', 'on_bad_lines']
_a = ['date_format']
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : str = ","
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[Union[int, List[int], str]] = "infer"
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[Union[int, str, List[int], List[str]]] = None
__UpperCAmelCase : Optional[Union[List[int], List[str]]] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[Literal["c", "python", "pyarrow"]] = None
__UpperCAmelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[Union[int, List[int]]] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[Union[str, List[str]]] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = "."
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = '"'
__UpperCAmelCase : int = 0
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : int = 1_0_0_0_0
__UpperCAmelCase : Optional[datasets.Features] = None
__UpperCAmelCase : Optional[str] = "strict"
__UpperCAmelCase : Literal["error", "warn", "skip"] = "error"
__UpperCAmelCase : Optional[str] = None
def _lowercase ( self : Tuple ):
if self.delimiter is not None:
__lowercase = self.delimiter
if self.column_names is not None:
__lowercase = self.column_names
@property
def _lowercase ( self : Union[str, Any] ):
__lowercase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), UpperCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : Tuple = CsvConfig
def _lowercase ( self : List[str] ):
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowercase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase__, (str, list, tuple) ):
__lowercase = data_files
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )]
__lowercase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__, gen_kwargs={"files": files} ) )
return splits
def _lowercase ( self : Dict, UpperCAmelCase__ : pa.Table ):
if self.config.features is not None:
__lowercase = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
__lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=UpperCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__lowercase = table_cast(UpperCAmelCase__, UpperCAmelCase__ )
return pa_table
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : List[str] ):
__lowercase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__lowercase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ):
__lowercase = pd.read_csv(UpperCAmelCase__, iterator=UpperCAmelCase__, dtype=UpperCAmelCase__, **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCAmelCase__ ):
__lowercase = pa.Table.from_pandas(UpperCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase__ )}: {e}""" )
raise
| 17 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
__SCREAMING_SNAKE_CASE = "CIDAS/clipseg-rd64-refined"
__SCREAMING_SNAKE_CASE = "image_segmenter"
__SCREAMING_SNAKE_CASE = CLIPSegForImageSegmentation
__SCREAMING_SNAKE_CASE = ["image", "text"]
__SCREAMING_SNAKE_CASE = ["image"]
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
requires_backends(self , ["vision"])
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
return self.pre_processor(text=[label] , images=[image] , padding=__lowerCamelCase , return_tensors="pt")
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[int]:
with torch.no_grad():
_A : Dict = self.model(**__lowerCamelCase).logits
return logits
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Any = outputs.cpu().detach().numpy()
_A : int = 0
_A : List[Any] = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 11 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
_a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
_a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
_a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ):
__lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 17 | 0 |
import os
import sys
UpperCAmelCase_ = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCAmelCase_ = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCamelCase__ ( *A__ : Optional[int] , **A__ : Optional[Any] ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCamelCase__ ( *A__ : Dict , **A__ : str ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCamelCase__ ( *A__ : Optional[int] , **A__ : Any ):
'''simple docstring'''
return AutoModel.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCamelCase__ ( *A__ : List[str] , **A__ : List[str] ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCamelCase__ ( *A__ : Union[str, Any] , **A__ : int ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCamelCase__ ( *A__ : Optional[int] , **A__ : List[Any] ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCamelCase__ ( *A__ : Optional[int] , **A__ : int ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A__ , **A__ )
| 12 |
"""simple docstring"""
from collections.abc import Sequence
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_))
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
__lowercase = 0.0
for coeff in reversed(UpperCamelCase_):
__lowercase = result * x + coeff
return result
if __name__ == "__main__":
_a = (0.0, 0.0, 5.0, 9.3, 7.0)
_a = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCAmelCase : Tuple = input("""Enter image url: """).strip()
print(f'''Downloading image from {url} ...''')
lowerCAmelCase : Dict = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
lowerCAmelCase : List[str] = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
lowerCAmelCase : List[Any] = requests.get(image_url).content
lowerCAmelCase : List[str] = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''')
| 13 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : str ):
super().__init__()
__lowercase = model
__lowercase = 2
__lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels )
def _lowercase ( self : Optional[int] ):
pass
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = LongformerModel.from_pretrained(UpperCamelCase_)
__lowercase = LightningModel(UpperCamelCase_)
__lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu"))
lightning_model.load_state_dict(ckpt["state_dict"])
# init longformer question answering model
__lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_)
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase_)
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 17 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = """▁"""
_lowerCamelCase : List[Any] = {"""vocab_file""": """spiece.model"""}
_lowerCamelCase : List[str] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
_lowerCamelCase : Optional[int] = {
"""google/reformer-crime-and-punishment""": 524288,
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str]="</s>" , UpperCAmelCase__ : Optional[Any]="<unk>" , UpperCAmelCase__ : List[str]=[] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Dict , ) ->None:
'''simple docstring'''
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict[str, int]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(UpperCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any) ->Dict:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : int , UpperCAmelCase__ : List[str]) ->Dict:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Any) ->int:
'''simple docstring'''
return self.sp_model.piece_to_id(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[str]) ->Tuple:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
A__ = self.sp_model.IdToPiece(UpperCAmelCase__)
return token
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Dict) ->Tuple:
'''simple docstring'''
A__ = []
A__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase__) + token
A__ = []
else:
current_sub_tokens.append(UpperCAmelCase__)
out_string += self.sp_model.decode(UpperCAmelCase__)
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase__ , '''wb''') as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__)
return (out_vocab_file,)
| 14 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, )
assert hasattr(self, "env" )
def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ):
# configuration for running training on smdistributed Model Parallel
__lowercase = {
"enabled": True,
"processes_per_host": 8,
}
__lowercase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_0_0,
}, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", )
def _lowercase ( self : Tuple, UpperCAmelCase__ : int ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ):
# create estimator
__lowercase = self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
| 17 | 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = MobileBertTokenizer
snake_case_ = MobileBertTokenizerFast
snake_case_ = True
snake_case_ = True
snake_case_ = filter_non_english
snake_case_ = "google/mobilebert-uncased"
def UpperCamelCase_ ( self : Optional[Any] ):
super().setUp()
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__A = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase_ ( self : Tuple ,A : List[str] ):
__A = "UNwant\u00E9d,running"
__A = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ ( self : str ):
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[9, 6, 7, 12, 10, 11] )
def UpperCamelCase_ ( self : List[Any] ):
if not self.test_rust_tokenizer:
return
__A = self.get_tokenizer()
__A = self.get_rust_tokenizer()
__A = "UNwant\u00E9d,running"
__A = tokenizer.tokenize(A )
__A = rust_tokenizer.tokenize(A )
self.assertListEqual(A ,A )
__A = tokenizer.encode(A ,add_special_tokens=A )
__A = rust_tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
__A = self.get_rust_tokenizer()
__A = tokenizer.encode(A )
__A = rust_tokenizer.encode(A )
self.assertListEqual(A ,A )
# With lower casing
__A = self.get_tokenizer(do_lower_case=A )
__A = self.get_rust_tokenizer(do_lower_case=A )
__A = "UNwant\u00E9d,running"
__A = tokenizer.tokenize(A )
__A = rust_tokenizer.tokenize(A )
self.assertListEqual(A ,A )
__A = tokenizer.encode(A ,add_special_tokens=A )
__A = rust_tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
__A = self.get_rust_tokenizer()
__A = tokenizer.encode(A )
__A = rust_tokenizer.encode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : List[str] ):
__A = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) ,["ah", "\u535A", "\u63A8", "zz"] )
def UpperCamelCase_ ( self : int ):
__A = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) ,["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) ,["hello"] )
def UpperCamelCase_ ( self : Tuple ):
__A = BasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) ,["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) ,["h\u00E9llo"] )
def UpperCamelCase_ ( self : List[Any] ):
__A = BasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) ,["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) ,["hello"] )
def UpperCamelCase_ ( self : str ):
__A = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) ,["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) ,["hello"] )
def UpperCamelCase_ ( self : List[str] ):
__A = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) ,["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase_ ( self : Tuple ):
__A = BasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) ,["HäLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = BasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) ,["HaLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase_ ( self : str ):
__A = BasicTokenizer(do_lower_case=A ,never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) ,["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__A = {}
for i, token in enumerate(A ):
__A = i
__A = WordpieceTokenizer(vocab=A ,unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) ,[] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) ,["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) ,["[UNK]", "runn", "##ing"] )
def UpperCamelCase_ ( self : Union[str, Any] ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def UpperCamelCase_ ( self : Optional[int] ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_tokenizer()
__A = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A ) for t in ["Test", "\xad", "test"]] ,[["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A ) for t in ["Test", "\xad", "test"]] ,[["[UNK]"], [], ["[UNK]"]] )
@slow
def UpperCamelCase_ ( self : Any ):
__A = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
__A = tokenizer.encode("sequence builders" ,add_special_tokens=A )
__A = tokenizer.encode("multi-sequence build" ,add_special_tokens=A )
__A = tokenizer.build_inputs_with_special_tokens(A )
__A = tokenizer.build_inputs_with_special_tokens(A ,A )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCamelCase_ ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__A = self.rust_tokenizer_class.from_pretrained(A ,**A )
__A = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__A = tokenizer_r.encode_plus(
A ,return_attention_mask=A ,return_token_type_ids=A ,return_offsets_mapping=A ,add_special_tokens=A ,)
__A = tokenizer_r.do_lower_case if hasattr(A ,"do_lower_case" ) else False
__A = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["offset_mapping"] )
def UpperCamelCase_ ( self : Optional[int] ):
__A = ["的", "人", "有"]
__A = "".join(A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__A = True
__A = self.tokenizer_class.from_pretrained(A ,**A )
__A = self.rust_tokenizer_class.from_pretrained(A ,**A )
__A = tokenizer_p.encode(A ,add_special_tokens=A )
__A = tokenizer_r.encode(A ,add_special_tokens=A )
__A = tokenizer_r.convert_ids_to_tokens(A )
__A = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A ,A )
self.assertListEqual(A ,A )
__A = False
__A = self.rust_tokenizer_class.from_pretrained(A ,**A )
__A = self.tokenizer_class.from_pretrained(A ,**A )
__A = tokenizer_r.encode(A ,add_special_tokens=A )
__A = tokenizer_p.encode(A ,add_special_tokens=A )
__A = tokenizer_r.convert_ids_to_tokens(A )
__A = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that only the first Chinese character is not preceded by "##".
__A = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(A )
]
self.assertListEqual(A ,A )
self.assertListEqual(A ,A )
| 15 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "openai/whisper-base"
__UpperCAmelCase : Union[str, Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCAmelCase : List[str] = "transcriber"
__UpperCAmelCase : Optional[Any] = WhisperProcessor
__UpperCAmelCase : str = WhisperForConditionalGeneration
__UpperCAmelCase : List[str] = ["audio"]
__UpperCAmelCase : Tuple = ["text"]
def _lowercase ( self : str, UpperCAmelCase__ : int ):
return self.pre_processor(UpperCAmelCase__, return_tensors="pt" ).input_features
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ):
return self.model.generate(inputs=UpperCAmelCase__ )
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int] ):
return self.pre_processor.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )[0]
| 17 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = inspect.getfile(accelerate.test_utils )
lowercase__ : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowercase__ : Tuple = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
lowercase__ : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowercase__ : Any = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case ,env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowercase__ : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case ,env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ : Any = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case ,env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
lowercase__ : str = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_snake_case ,env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ = (accelerator.state.process_index + 2, 10)
lowerCAmelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
lowerCAmelCase_ = ''
lowerCAmelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCAmelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCAmelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase_, torch.Tensor):
return image
elif isinstance(UpperCamelCase_, PIL.Image.Image):
__lowercase = [image]
if isinstance(image[0], PIL.Image.Image):
__lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
__lowercase = np.concatenate(UpperCamelCase_, axis=0)
__lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0
__lowercase = image.transpose(0, 3, 1, 2)
__lowercase = 2.0 * image - 1.0
__lowercase = torch.from_numpy(UpperCamelCase_)
elif isinstance(image[0], torch.Tensor):
__lowercase = torch.cat(UpperCamelCase_, dim=0)
return image
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int:
'''simple docstring'''
if not isinstance(UpperCamelCase_, np.ndarray):
__lowercase = True
__lowercase = va.device
__lowercase = va.cpu().numpy()
__lowercase = va.cpu().numpy()
__lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_)))
if np.abs(UpperCamelCase_) > DOT_THRESHOLD:
__lowercase = (1 - t) * va + t * va
else:
__lowercase = np.arccos(UpperCamelCase_)
__lowercase = np.sin(UpperCamelCase_)
__lowercase = theta_a * t
__lowercase = np.sin(UpperCamelCase_)
__lowercase = np.sin(theta_a - theta_t) / sin_theta_a
__lowercase = sin_theta_t / sin_theta_a
__lowercase = sa * va + sa * va
if inputs_are_torch:
__lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_)
return va
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int:
'''simple docstring'''
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]:
'''simple docstring'''
for param in model.parameters():
__lowercase = value
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ):
super().__init__()
self.register_modules(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, )
__lowercase = (
feature_extractor.size
if isinstance(feature_extractor.size, UpperCAmelCase__ )
else feature_extractor.size["shortest_edge"]
)
__lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, UpperCAmelCase__ )
set_requires_grad(self.clip_model, UpperCAmelCase__ )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : int ):
self.enable_attention_slicing(UpperCAmelCase__ )
def _lowercase ( self : str ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ):
# get the original timestep using init_timestep
__lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ )
__lowercase = max(num_inference_steps - init_timestep, 0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ):
if not isinstance(UpperCAmelCase__, torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" )
__lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ )
]
__lowercase = torch.cat(UpperCAmelCase__, dim=0 )
else:
__lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 0.18_215 * init_latents
__lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 )
__lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
# get latents
__lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = init_latents
return latents
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ):
__lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
__lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ):
__lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ )
__lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ):
__lowercase = latents.detach().requires_grad_()
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowercase = self.scheduler.alphas_cumprod[timestep]
__lowercase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowercase = torch.sqrt(UpperCAmelCase__ )
__lowercase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = self.scheduler.sigmas[index]
__lowercase = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * sample
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ )
__lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype )
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale
__lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0]
if isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = latents.detach() + grads * (sigma**2)
__lowercase = noise_pred_original
else:
__lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1:
__lowercase = [generator] + [None] * (batch_size - 1)
__lowercase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__lowercase = [x[0] for x in coca_is_none if x[1]]
__lowercase = ", ".join(UpperCAmelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
if style_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
# get prompt text embeddings for content and style
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# duplicate text embeddings for each generation per prompt
__lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# set timesteps
__lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowercase = {}
if accepts_offset:
__lowercase = 1
self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device )
__lowercase = timesteps[:1].repeat(UpperCAmelCase__ )
# Preprocess image
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if clip_guidance_scale > 0:
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = slerp(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = content_text_input.input_ids.shape[-1]
__lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to(
self.device )
else:
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
# check if the scheduler accepts generator
__lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowercase = generator
with self.progress_bar(total=UpperCAmelCase__ ):
for i, t in enumerate(UpperCAmelCase__ ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowercase ,__lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowercase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowercase ,__lowercase = self.cond_fn(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * latents
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
| 17 | 0 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _snake_case ( lowerCAmelCase : Features ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.inf
def set_batch_size(lowerCAmelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(lowerCAmelCase , lowerCAmelCase ) and feature.dtype == "binary":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(lowerCAmelCase , lowerCAmelCase )
return None if batch_size is np.inf else batch_size
class a__ ( A__ ):
def __init__( self : List[str],_A : NestedDataStructureLike[PathLike],_A : Optional[NamedSplit] = None,_A : Optional[Features] = None,_A : str = None,_A : bool = False,_A : bool = False,_A : Optional[int] = None,**_A : int,):
"""simple docstring"""
super().__init__(
_A,split=_A,features=_A,cache_dir=_A,keep_in_memory=_A,streaming=_A,num_proc=_A,**_A,)
SCREAMING_SNAKE_CASE_ : int = path_or_paths if isinstance(_A,_A ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE_ : Tuple = _PACKAGED_DATASETS_MODULES["parquet"][1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = Parquet(
cache_dir=_A,data_files=_A,features=_A,hash=_A,**_A,)
def __UpperCamelCase ( self : str ):
"""simple docstring"""
if self.streaming:
SCREAMING_SNAKE_CASE_ : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
self.builder.download_and_prepare(
download_config=_A,download_mode=_A,verification_mode=_A,base_path=_A,num_proc=self.num_proc,)
SCREAMING_SNAKE_CASE_ : List[Any] = self.builder.as_dataset(
split=self.split,verification_mode=_A,in_memory=self.keep_in_memory )
return dataset
class a__ :
def __init__( self : int,_A : Dataset,_A : Union[PathLike, BinaryIO],_A : Optional[int] = None,**_A : str,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = dataset
SCREAMING_SNAKE_CASE_ : Union[str, Any] = path_or_buf
SCREAMING_SNAKE_CASE_ : int = batch_size or get_writer_batch_size(dataset.features )
SCREAMING_SNAKE_CASE_ : List[Any] = parquet_writer_kwargs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf,(str, bytes, os.PathLike) ):
with open(self.path_or_buf,"wb+" ) as buffer:
SCREAMING_SNAKE_CASE_ : Any = self._write(file_obj=_A,batch_size=_A,**self.parquet_writer_kwargs )
else:
SCREAMING_SNAKE_CASE_ : Dict = self._write(file_obj=self.path_or_buf,batch_size=_A,**self.parquet_writer_kwargs )
return written
def __UpperCamelCase ( self : str,_A : BinaryIO,_A : int,**_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Any = parquet_writer_kwargs.pop("path_or_buf",_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.dataset.features.arrow_schema
SCREAMING_SNAKE_CASE_ : List[str] = pq.ParquetWriter(_A,schema=_A,**_A )
for offset in logging.tqdm(
range(0,len(self.dataset ),_A ),unit="ba",disable=not logging.is_progress_bar_enabled(),desc="Creating parquet from Arrow format",):
SCREAMING_SNAKE_CASE_ : Any = query_table(
table=self.dataset._data,key=slice(_A,offset + batch_size ),indices=self.dataset._indices if self.dataset._indices is not None else None,)
writer.write_table(_A )
written += batch.nbytes
writer.close()
return written
| 18 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
__UpperCAmelCase : Tuple = XGLMConfig
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Union[str, Any] = "gelu"
def __init__( self : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=1_4, UpperCAmelCase__ : str=7, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[Any]=True, UpperCAmelCase__ : int=True, UpperCAmelCase__ : List[str]=9_9, UpperCAmelCase__ : Union[str, Any]=3_2, UpperCAmelCase__ : Union[str, Any]=2, UpperCAmelCase__ : Union[str, Any]=4, UpperCAmelCase__ : Tuple=3_7, UpperCAmelCase__ : List[Any]="gelu", UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Tuple=5_1_2, UpperCAmelCase__ : Optional[Any]=0.02, ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = d_model
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = ffn_dim
__lowercase = activation_function
__lowercase = activation_dropout
__lowercase = attention_dropout
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = None
__lowercase = 0
__lowercase = 2
__lowercase = 1
def _lowercase ( self : Union[str, Any] ):
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _lowercase ( self : Tuple ):
__lowercase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = self.get_config()
__lowercase = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self : List[Any] ):
return XGLMConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=UpperCAmelCase__, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=UpperCAmelCase__, )
def _lowercase ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCAmelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def _lowercase ( self : Optional[Any] ):
__lowercase = TFXGLMModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, n_embd=3_7 )
def _lowercase ( self : Any ):
self.config_tester.run_common_tests()
@slow
def _lowercase ( self : List[str] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFXGLMModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _lowercase ( self : int ):
super().test_resize_token_embeddings()
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int]=True ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]], dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowercase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[Any] ):
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
__lowercase = tokenizer("Today is a nice day and", return_tensors="tf" )
__lowercase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, seed=[7, 0] )
__lowercase = tokenizer.decode(output_ids[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
@slow
def _lowercase ( self : Dict ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = "left"
# use different length sentences to test batching
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
__lowercase = tokenizer(UpperCAmelCase__, return_tensors="tf", padding=UpperCAmelCase__ )
__lowercase = inputs["input_ids"]
__lowercase = model.generate(input_ids=UpperCAmelCase__, attention_mask=inputs["attention_mask"], max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[0], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[1], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_non_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, [non_padded_sentence, padded_sentence] )
| 17 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase_ ( ):
lowerCamelCase_ , lowerCamelCase_ = 9, 1_4 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowerCamelCase__ )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_a = '__DUMMY_TRANSFORMERS_USER__'
_a = 'Dummy User'
_a = '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'
_a = 'https://hub-ci.huggingface.co'
_a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
_a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
_a = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _A ( UpperCamelCase_ : List[Any]) -> Tuple:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : int) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT", UpperCamelCase_)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Dict:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : List[Any]) -> List[str]:
'''simple docstring'''
HfFolder.save_token(UpperCamelCase_)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def _A ( ) -> List[Any]:
'''simple docstring'''
return HfApi(endpoint=UpperCamelCase_)
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi) -> List[Any]:
'''simple docstring'''
__lowercase = HfFolder.get_token()
HfFolder.save_token(UpperCamelCase_)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Dict) -> int:
'''simple docstring'''
def _cleanup_repo(UpperCamelCase_ : Optional[int]):
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Any:
'''simple docstring'''
@contextmanager
def _temporary_repo(UpperCamelCase_ : Any):
try:
yield repo_id
finally:
cleanup_repo(UpperCamelCase_)
return _temporary_repo
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : str, UpperCamelCase_ : Optional[int]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data/text_data.txt", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Any, UpperCamelCase_ : Dict) -> Optional[int]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]) -> int:
'''simple docstring'''
__lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Dict, UpperCamelCase_ : Any) -> int:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> str:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 17 | 0 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(SCREAMING_SNAKE_CASE__ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : Tuple = _distribute_shards(**SCREAMING_SNAKE_CASE__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : Optional[int] = _split_gen_kwargs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
_number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE__ )
else:
lowercase : str = _number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE__ )
assert out == expected
| 20 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : int = "time_series_transformer"
__UpperCAmelCase : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : int, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : str = "student_t", UpperCAmelCase__ : str = "nll", UpperCAmelCase__ : int = 1, UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7], UpperCAmelCase__ : Optional[Union[str, bool]] = "mean", UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : str = "gelu", UpperCAmelCase__ : int = 6_4, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : int = 1_0_0, UpperCAmelCase__ : float = 0.02, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : List[str], ):
# time series specific configuration
__lowercase = prediction_length
__lowercase = context_length or prediction_length
__lowercase = distribution_output
__lowercase = loss
__lowercase = input_size
__lowercase = num_time_features
__lowercase = lags_sequence
__lowercase = scaling
__lowercase = num_dynamic_real_features
__lowercase = num_static_real_features
__lowercase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__lowercase = cardinality
else:
__lowercase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__lowercase = embedding_dimension
else:
__lowercase = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality]
__lowercase = num_parallel_samples
# Transformer architecture configuration
__lowercase = input_size * len(UpperCAmelCase__ ) + self._number_of_features
__lowercase = d_model
__lowercase = encoder_attention_heads
__lowercase = decoder_attention_heads
__lowercase = encoder_ffn_dim
__lowercase = decoder_ffn_dim
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = activation_function
__lowercase = init_std
__lowercase = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__, **UpperCAmelCase__ )
@property
def _lowercase ( self : Optional[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 17 | 0 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_lowercase : Optional[Any] = [1]
for i in range(2 , lowerCamelCase_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_lowercase : int = []
_lowercase : Union[str, Any] = list(range(lowerCamelCase_ ) )
# Find permutation
while factorials:
_lowercase : Dict = factorials.pop()
_lowercase , _lowercase : Any = divmod(lowerCamelCase_ , lowerCamelCase_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : List[Any] ):
pass
def _A ( UpperCamelCase_ : Union[str, Any]) -> Any:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_a = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any] ):
__lowercase = pipeline(
"document-question-answering", model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
__lowercase = INVOICE_URL
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
__lowercase = "What is the placebo?"
__lowercase = [
{
"image": load_image(UpperCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _lowercase ( self : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any ):
__lowercase = dqa_pipeline(UpperCAmelCase__, top_k=2 )
self.assertEqual(
UpperCAmelCase__, [
[
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
]
]
* 3, )
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2" )
__lowercase = INVOICE_URL
__lowercase = "How many cats are there?"
__lowercase = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 3_8, "end": 3_9},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 3_8, "end": 4_0},
]
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
# We can optionnally pass directly the words and bounding boxes
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = []
__lowercase = []
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, words=UpperCAmelCase__, boxes=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : List[str] ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
],
]
* 2, )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Union[str, Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
@slow
@require_torch
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _lowercase ( self : List[Any] ):
pass
| 17 | 0 |
'''simple docstring'''
import string
from math import logaa
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
_UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> tuple[int, int]:
'''simple docstring'''
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("\n" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__lowercase ))
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any]=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 22 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict, *, # begin keyword-only arguments
UpperCAmelCase__ : str="<s>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : str="</s>", UpperCAmelCase__ : Optional[Any]="<unk>", UpperCAmelCase__ : List[Any]=None, ):
__lowercase ,__lowercase ,__lowercase ,__lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase__ )
__lowercase = len(self.symbols )
def __eq__( self : List[str], UpperCAmelCase__ : Dict ):
return self.indices == other.indices
def __getitem__( self : Optional[int], UpperCAmelCase__ : List[str] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ):
return len(self.symbols )
def __contains__( self : Any, UpperCAmelCase__ : Optional[Any] ):
return sym in self.indices
@classmethod
def _lowercase ( cls : List[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = cls()
d.add_from_file(UpperCAmelCase__ )
return d
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : str=False ):
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(UpperCAmelCase__ )
self.count.append(UpperCAmelCase__ )
return idx
def _lowercase ( self : Any, UpperCAmelCase__ : str ):
return 0
def _lowercase ( self : Tuple, UpperCAmelCase__ : List[Any] ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
try:
with open(UpperCAmelCase__, "r", encoding="utf-8" ) as fd:
self.add_from_file(UpperCAmelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCAmelCase__ ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(UpperCAmelCase__ )
for line in lines[indices_start_line:]:
try:
__lowercase ,__lowercase = line.rstrip().rsplit(" ", 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase ,__lowercase = line.rsplit(" ", 1 )
else:
__lowercase = False
__lowercase = int(UpperCAmelCase__ )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(UpperCAmelCase__ ) )
self.add_symbol(UpperCAmelCase__, n=UpperCAmelCase__, overwrite=UpperCAmelCase__ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def _A ( UpperCamelCase_ : int) -> str:
'''simple docstring'''
__lowercase = dict((re.sub(r"@@$", "", UpperCamelCase_), v) if k.endswith("@@") else (re.sub(r"$", "</w>", UpperCamelCase_), v) for k, v in d.items())
__lowercase = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowercase = d[k] # restore
return da
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> List[Any]:
'''simple docstring'''
if not os.path.exists(UpperCamelCase_):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""")
os.makedirs(UpperCamelCase_, exist_ok=UpperCamelCase_)
print(F"""Writing results to {pytorch_dump_folder_path}""")
# handle various types of models
__lowercase = os.path.join(UpperCamelCase_, "checkpoint.pt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""")
__lowercase = torch.load(UpperCamelCase_, map_location="cpu")
__lowercase = chkpt["cfg"]["model"]
# dicts
__lowercase = os.path.join(UpperCamelCase_, "dict.txt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {dict_file} does not exist!""")
__lowercase = Dictionary.load(UpperCamelCase_)
__lowercase = rewrite_dict_keys(src_dict.indices)
__lowercase = len(UpperCamelCase_)
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["vocab_file"])
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# merges_file (bpecodes)
__lowercase = os.path.join(UpperCamelCase_, "bpecodes")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""")
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["merges_file"])
shutil.copyfile(UpperCamelCase_, UpperCamelCase_)
# model config
__lowercase = os.path.join(UpperCamelCase_, "config.json")
__lowercase = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# tokenizer config
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
__lowercase = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F"""Generating {biogpt_tokenizer_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# model
__lowercase = chkpt["model"]
# remove unneeded keys
__lowercase = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase_, UpperCamelCase_)
__lowercase = list(model_state_dict.keys())
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight"):
__lowercase = model_state_dict.pop(UpperCamelCase_)
else:
__lowercase = model_state_dict.pop(UpperCamelCase_)
__lowercase = BioGptConfig.from_pretrained(UpperCamelCase_)
__lowercase = BioGptForCausalLM(UpperCamelCase_)
# check that it loads ok
model_new.load_state_dict(UpperCamelCase_)
# save
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
print(F"""Generating {pytorch_weights_dump_path}""")
torch.save(UpperCamelCase_, UpperCamelCase_)
print("Conversion is done!")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 17 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = MODEL_FOR_MASKED_LM_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_MASKED_LM_MAPPING
def A ( self : List[str] ) -> List[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase : Tuple = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
UpperCAmelCase : str = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 38015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 25506, '''token_str''': ''' accuser'''},
] , )
UpperCAmelCase : Optional[Any] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-05,
'''token''': 38015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-05,
'''token''': 25506,
'''token_str''': ''' accuser''',
},
] , )
UpperCAmelCase : Any = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
UpperCAmelCase : List[Any] = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 35676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
UpperCAmelCase : int = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
UpperCAmelCase : Tuple = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 2941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 13606, '''token_str''': ''' Clara'''},
] , )
UpperCAmelCase : int = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
[
{
'''score''': 2.2E-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : List[Any] = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
UpperCAmelCase : List[Any] = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__snake_case , __snake_case )
@slow
@require_torch
def A ( self : Optional[Any] ) -> int:
UpperCAmelCase : Tuple = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(__snake_case )
@slow
@require_tf
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Tuple = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(__snake_case )
def A ( self : Dict , __snake_case : Any ) -> str:
UpperCAmelCase : List[str] = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''sequence''': '''My name is John''', '''score''': 0.0_08, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.0_07, '''token''': 1573, '''token_str''': ''' Chris'''},
] , )
UpperCAmelCase : Optional[int] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.2_51,
'''token''': 2201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.2_14,
'''token''': 12790,
'''token_str''': ''' Lyon''',
},
] , )
UpperCAmelCase : Tuple = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.0_05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.0_00, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.0_00, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def A ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : List[str] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Union[str, Any] = None
self.run_pipeline_test(__snake_case , [] )
@require_tf
def A ( self : Dict ) -> List[Any]:
UpperCAmelCase : List[str] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
self.run_pipeline_test(__snake_case , [] )
def A ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[int] ) -> Union[str, Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
UpperCAmelCase : Optional[Any] = [
F"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def A ( self : List[str] , __snake_case : Any , __snake_case : str ) -> str:
UpperCAmelCase : int = fill_masker.tokenizer
UpperCAmelCase : Tuple = fill_masker.model
UpperCAmelCase : Optional[Any] = fill_masker(
F"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
UpperCAmelCase : int = fill_masker([F"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
UpperCAmelCase : Union[str, Any] = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
__snake_case , [
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
] , )
with self.assertRaises(__snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__snake_case ):
fill_masker('''This is''' )
self.run_test_top_k(__snake_case , __snake_case )
self.run_test_targets(__snake_case , __snake_case )
self.run_test_top_k_targets(__snake_case , __snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(__snake_case , __snake_case )
self.fill_mask_with_multiple_masks(__snake_case , __snake_case )
def A ( self : Any , __snake_case : Any , __snake_case : List[Any] ) -> List[Any]:
UpperCAmelCase : Tuple = tokenizer.get_vocab()
UpperCAmelCase : List[str] = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCAmelCase : Any = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case , targets=__snake_case )
UpperCAmelCase : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
UpperCAmelCase : Any = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , __snake_case )
UpperCAmelCase : int = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(__snake_case ) )
# Call argument
UpperCAmelCase : List[Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
UpperCAmelCase : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=__snake_case )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
UpperCAmelCase : Tuple = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , __snake_case )
UpperCAmelCase : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(__snake_case ) )
# Score equivalence
UpperCAmelCase : int = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=__snake_case )
UpperCAmelCase : int = [top_mask['''token_str'''] for top_mask in outputs]
UpperCAmelCase : Optional[int] = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__snake_case ) == set(__snake_case ):
UpperCAmelCase : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=__snake_case )
UpperCAmelCase : str = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
# Raises with invalid
with self.assertRaises(__snake_case ):
UpperCAmelCase : Any = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__snake_case ):
UpperCAmelCase : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[''''''] )
with self.assertRaises(__snake_case ):
UpperCAmelCase : Any = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets='''''' )
def A ( self : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : List[Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case , top_k=2 )
UpperCAmelCase : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
UpperCAmelCase : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
def A ( self : Dict , __snake_case : List[Any] , __snake_case : Tuple ) -> int:
UpperCAmelCase : List[str] = tokenizer.get_vocab()
UpperCAmelCase : Optional[int] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
# top_k=2, ntargets=3
UpperCAmelCase : Union[str, Any] = sorted(vocab.keys() )[:3]
UpperCAmelCase : Any = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=__snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCAmelCase : Union[str, Any] = [el['''token_str'''] for el in sorted(__snake_case , key=lambda __snake_case : x["score"] , reverse=__snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__snake_case ).issubset(__snake_case ):
UpperCAmelCase : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=__snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
def A ( self : Tuple , __snake_case : Dict , __snake_case : Dict ) -> Union[str, Any]:
UpperCAmelCase : List[str] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
UpperCAmelCase : str = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCAmelCase : Optional[int] = sorted(vocab.keys() )[:3]
UpperCAmelCase : Union[str, Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCAmelCase : List[Any] = fill_masker(F"""My name is {tokenizer.mask_token}""" , targets=__snake_case , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__snake_case ) , 3 )
def A ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : List[Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
UpperCAmelCase : Optional[Any] = fill_masker(
F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__snake_case , [
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
] , )
| 23 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any, UpperCAmelCase__ : int ):
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
self.m_edges.append([u_node, v_node, weight] )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowercase ( self : List[Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase__ )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(UpperCAmelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase__ )
def _lowercase ( self : Any ):
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _A ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
def lowerCamelCase__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(snake_case_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 |
"""simple docstring"""
from math import sqrt
def _A ( UpperCamelCase_ : int) -> int:
'''simple docstring'''
__lowercase = 0
for i in range(1, int(sqrt(UpperCamelCase_) + 1)):
if n % i == 0 and i != sqrt(UpperCamelCase_):
total += i + n // i
elif i == sqrt(UpperCamelCase_):
total += i
return total - n
def _A ( UpperCamelCase_ : int = 10000) -> int:
'''simple docstring'''
__lowercase = sum(
i
for i in range(1, UpperCamelCase_)
if sum_of_divisors(sum_of_divisors(UpperCamelCase_)) == i and sum_of_divisors(UpperCamelCase_) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 17 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase__ : str = 5_0_0_0_0
UpperCAmelCase__ : List[str] = 5_0_0_0
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = os.path.split(__file__)
UpperCAmelCase__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowercase_ ( _snake_case ,_snake_case ):
for i in range(_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = dataset[i]
@get_duration
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
for i in range(0 ,len(_snake_case ) ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = dataset[i : i + batch_size]
@get_duration
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
with dataset.formatted_as(type=_snake_case ):
for i in range(_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = dataset[i]
@get_duration
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
with dataset.formatted_as(type=_snake_case ):
for i in range(0 ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = dataset[i : i + batch_size]
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
SCREAMING_SNAKE_CASE__ : Optional[int] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
SCREAMING_SNAKE_CASE__ : str = generate_example_dataset(
os.path.join(_snake_case ,"""dataset.arrow""" ) ,_snake_case ,num_examples=_snake_case ,seq_shapes={"""list""": (100,)} ,)
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ ,str(_snake_case ) )
SCREAMING_SNAKE_CASE__ : str = func(_snake_case ,**_snake_case )
print("""shuffling dataset""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ ,func.__name__ ,str(_snake_case ) )
SCREAMING_SNAKE_CASE__ : Tuple = func(
_snake_case ,**_snake_case )
with open(_snake_case ,"""wb""" ) as f:
f.write(json.dumps(_snake_case ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 25 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a = _symbol_database.Default()
_a = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
_a = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a = None
_a = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a = 45
_a = 15_81
_a = 15_17
_a = 15_70
_a = 15_84
_a = 17_93
_a = 17_95
_a = 19_16
_a = 18_64
_a = 19_05
_a = 19_19
_a = 24_29
_a = 22_08
_a = 24_18
_a = 23_23
_a = 24_07
# @@protoc_insertion_point(module_scope)
| 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_table_transformer": [
"TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TableTransformerConfig",
"TableTransformerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TableTransformerForObjectDetection",
"TableTransformerModel",
"TableTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
"""simple docstring"""
import baseaa
def _A ( UpperCamelCase_ : str) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8"))
def _A ( UpperCamelCase_ : bytes) -> str:
'''simple docstring'''
return baseaa.baadecode(UpperCamelCase_).decode("utf-8")
if __name__ == "__main__":
_a = 'Hello World!'
_a = baseaa_encode(test)
print(encoded)
_a = baseaa_decode(encoded)
print(decoded)
| 17 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "naver-clova-ix/donut-base-finetuned-docvqa"
A_ = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
A_ = "document_qa"
A_ = AutoProcessor
A_ = VisionEncoderDecoderModel
A_ = ["image", "text"]
A_ = ["text"]
def __init__( self , *__a , **__a ):
'''simple docstring'''
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__a , **__a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : List[str] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__a : Any = task_prompt.replace('{user_input}' , __a )
__a : Optional[Any] = self.pre_processor.tokenizer(
__a , add_special_tokens=__a , return_tensors='pt' ).input_ids
__a : Optional[int] = self.pre_processor(__a , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Union[str, Any] = self.pre_processor.batch_decode(__a )[0]
__a : str = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__a : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__a : Any = re.sub(r'<.*?>' , '' , __a , count=1 ).strip() # remove first task start token
__a : Tuple = self.pre_processor.tokenajson(__a )
return sequence["answer"]
| 27 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Any) -> List[str]:
'''simple docstring'''
__lowercase ,__lowercase = [], []
while len(UpperCamelCase_) > 1:
__lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_)
start.append(UpperCamelCase_)
end.append(UpperCamelCase_)
collection.remove(UpperCamelCase_)
collection.remove(UpperCamelCase_)
end.reverse()
return start + collection + end
if __name__ == "__main__":
_a = input('Enter numbers separated by a comma:\n').strip()
_a = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 17 | 0 |
'''simple docstring'''
from torch import nn
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 28 |
"""simple docstring"""
def _A ( UpperCamelCase_ : list[int]) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty")
__lowercase = sum(UpperCamelCase_) / len(UpperCamelCase_) # Calculate the average
return sum(abs(x - average) for x in nums) / len(UpperCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '▁'
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCAmelCase = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCAmelCase = {
'facebook/mbart-large-50-one-to-many-mmt': 1024,
}
# fmt: off
__UpperCAmelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Union[str, Any] = ['''input_ids''', '''attention_mask''']
_snake_case : List[int] = []
_snake_case : List[int] = []
def __init__( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase = None , **_UpperCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : List[str] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
UpperCAmelCase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ : str = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
UpperCAmelCase_ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : Optional[int] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Union[str, Any] = len(self.sp_model )
UpperCAmelCase_ : Optional[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCamelCase )
}
UpperCAmelCase_ : int = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_ : Optional[int] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_ : Optional[int] = src_lang if src_lang is not None else 'en_XX'
UpperCAmelCase_ : Tuple = self.lang_code_to_id[self._src_lang]
UpperCAmelCase_ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCAmelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
UpperCAmelCase_ : List[str] = self.__dict__.copy()
UpperCAmelCase_ : str = None
return state
def __setstate__( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[str] = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : int = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : List[Any] = ''
UpperCAmelCase_ : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = []
else:
current_sub_tokens.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , 'wb' ) as fi:
UpperCAmelCase_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : List[str] = [1] * len(self.prefix_tokens )
UpperCAmelCase_ : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCAmelCase_ : str = src_lang
UpperCAmelCase_ : Any = self(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : List[Any] = self.convert_tokens_to_ids(_UpperCamelCase )
UpperCAmelCase_ : Any = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = "en_XX" , _UpperCamelCase = None , _UpperCamelCase = "ro_RO" , **_UpperCamelCase , ) -> BatchEncoding:
UpperCAmelCase_ : Optional[int] = src_lang
UpperCAmelCase_ : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : str = self.lang_code_to_id[src_lang]
UpperCAmelCase_ : Union[str, Any] = [self.cur_lang_code_id]
UpperCAmelCase_ : Union[str, Any] = [self.eos_token_id]
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : int = self.lang_code_to_id[tgt_lang]
UpperCAmelCase_ : str = [self.cur_lang_code_id]
UpperCAmelCase_ : List[str] = [self.eos_token_id]
| 29 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ):
__lowercase = parent
__lowercase = vocab_size
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def _lowercase ( self : int ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size )
__lowercase = BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, )
return config, pixel_values, labels
def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ):
__lowercase = FlaxBeitModel(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ):
__lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ):
__lowercase = self.type_sequence_label_size
__lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _lowercase ( self : List[Any] ):
__lowercase = FlaxBeitModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(UpperCAmelCase__ )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1], UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ):
return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape, output.shape )
def _lowercase ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(UpperCAmelCase__ )
def _A ( ) -> str:
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_vision
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values
# prepare bool_masked_pos
__lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ )
# forward pass
__lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) )
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_0_0_0)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_8_1
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[str] ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 2_1_8_4_1)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
| 17 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a = logging.get_logger(__name__)
# General docstring
__a = 'RegNetConfig'
# Base docstring
__a = 'facebook/regnet-y-040'
__a = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__a = 'facebook/regnet-y-040'
__a = 'tabby, tabby cat'
__a = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' , )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
lowercase_ = ACTaFN[activation] if activation is not None else tf.identity
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = self.convolution(self.padding(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : str ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_channels
lowercase_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
lowercase_ = shape_list(SCREAMING_SNAKE_CASE_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1) )
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
lowercase_ = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
for layer_module in self.attention:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_state * pooled
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.2''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.3''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase_ = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''layers.0''' ),
*[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> int:
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'''stages.{i+1}''' ) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention:
lowercase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
lowercase_ = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ )
@keras_serializable
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
a :str = RegNetConfig
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config
lowercase_ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name='''embedder''' )
lowercase_ = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name='''encoder''' )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
@unpack_inputs
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = encoder_outputs[0]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
# Change to NCHW output format have uniformity in the modules
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase_ = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = RegNetConfig
a :Any = 'regnet'
a :List[str] = 'pixel_values'
@property
def _lowercase ( self : List[str] ) -> str:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
__a = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_labels
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
# classification head
lowercase_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier[0](SCREAMING_SNAKE_CASE_ )
lowercase_ = self.classifier[1](SCREAMING_SNAKE_CASE_ )
lowercase_ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ )
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
| 30 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowerCAmelCase ( unittest.TestCase ,lowercase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
__lowercase = load_tool("text-classification" )
self.tool.setup()
__lowercase = load_tool("text-classification", remote=UpperCAmelCase__ )
def _lowercase ( self : str ):
__lowercase = self.tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : str ):
__lowercase = self.remote_tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : List[str] ):
__lowercase = self.tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : Tuple ):
__lowercase = self.remote_tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
| 17 | 0 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase_ ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=False ) -> str:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
_UpperCAmelCase : Optional[int] = os.path.abspath(_UpperCAmelCase )
logger.info(F"""Loading PyTorch weights from {pt_path}""" )
_UpperCAmelCase : Tuple = torch.load(_UpperCAmelCase , map_location="cpu" )
logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
_UpperCAmelCase : Tuple = convert_pytorch_state_dict_to_flax(_UpperCAmelCase , _UpperCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
_UpperCAmelCase : Optional[Any] = convert_pytorch_sharded_state_dict_to_flax(_UpperCAmelCase , _UpperCAmelCase )
return flax_state_dict
def UpperCamelCase_ ( _UpperCAmelCase : Tuple[str] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, jnp.ndarray] , _UpperCAmelCase : str , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(_UpperCAmelCase : Tuple[str] ) -> bool:
return len(set(_UpperCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
_UpperCAmelCase : int = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
_UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
_UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
_UpperCAmelCase : Optional[Any] = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
_UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCAmelCase ):
_UpperCAmelCase : int = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCAmelCase ):
_UpperCAmelCase : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_UpperCAmelCase : Any = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
_UpperCAmelCase : List[str] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
_UpperCAmelCase : List[str] = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
_UpperCAmelCase : int = pt_tuple_key[-2] + "_v"
if name is not None:
_UpperCAmelCase : Tuple = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
_UpperCAmelCase : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
_UpperCAmelCase : str = flax_model.params["params"]
else:
_UpperCAmelCase : Union[str, Any] = flax_model.params
_UpperCAmelCase : List[Any] = flatten_dict(_UpperCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_UpperCAmelCase : Dict = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCAmelCase )
_UpperCAmelCase : str = {}
_UpperCAmelCase : List[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
_UpperCAmelCase : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCAmelCase : int = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
_UpperCAmelCase : Any = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_UpperCAmelCase : Tuple = pt_tuple_key[1:]
# Correctly rename weight parameters
_UpperCAmelCase , _UpperCAmelCase : List[Any] = rename_key_and_reshape_tensor(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# add model prefix if necessary
_UpperCAmelCase : Optional[int] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_UpperCAmelCase : Union[str, Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
_UpperCAmelCase : List[str] = jnp.asarray(_UpperCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
continue
# also add unexpected weight so that warning is thrown
_UpperCAmelCase : Union[str, Any] = jnp.asarray(_UpperCAmelCase )
else:
# also add unexpected weight so that warning is thrown
_UpperCAmelCase : Optional[int] = jnp.asarray(_UpperCAmelCase )
return unflatten_dict(_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import torch
# Load the index
_UpperCAmelCase : Union[str, Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
_UpperCAmelCase : List[str] = torch.load(_UpperCAmelCase )
_UpperCAmelCase : int = {k: v.numpy() for k, v in pt_state_dict.items()}
_UpperCAmelCase : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_UpperCAmelCase : Optional[Any] = flax_model.params["params"]
_UpperCAmelCase : List[Any] = flatten_dict(_UpperCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
_UpperCAmelCase : List[Any] = flax_model.params
_UpperCAmelCase : List[str] = flatten_dict(_UpperCAmelCase )
_UpperCAmelCase : Optional[int] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
_UpperCAmelCase : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCAmelCase : int = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
_UpperCAmelCase : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_UpperCAmelCase : str = pt_tuple_key[1:]
# Correctly rename weight parameters
_UpperCAmelCase , _UpperCAmelCase : Tuple = rename_key_and_reshape_tensor(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# add model prefix if necessary
_UpperCAmelCase : Tuple = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_UpperCAmelCase : Tuple = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
_UpperCAmelCase : Union[str, Any] = jnp.asarray(_UpperCAmelCase )
continue
if "var" in flax_key[-1]:
_UpperCAmelCase : str = jnp.asarray(_UpperCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
continue
# also add unexpected weight so that warning is thrown
_UpperCAmelCase : List[str] = jnp.asarray(_UpperCAmelCase )
else:
# also add unexpected weight so that warning is thrown
_UpperCAmelCase : Any = jnp.asarray(_UpperCAmelCase )
return unflatten_dict(_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[Any] = os.path.abspath(_UpperCAmelCase )
logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
_UpperCAmelCase : List[str] = getattr(_UpperCAmelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCAmelCase , "rb" ) as state_f:
try:
_UpperCAmelCase : Dict = from_bytes(_UpperCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
_UpperCAmelCase : List[str] = flatten_dict(jax.tree_util.tree_map(lambda _UpperCAmelCase : x.dtype == jnp.bfloataa , _UpperCAmelCase ) ).values()
if any(_UpperCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
_UpperCAmelCase : Optional[int] = jax.tree_util.tree_map(
lambda _UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCAmelCase )
_UpperCAmelCase : Tuple = flatten_dict(_UpperCAmelCase )
_UpperCAmelCase : List[str] = pt_model.state_dict()
_UpperCAmelCase : Tuple = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
_UpperCAmelCase : Union[str, Any] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : str = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_UpperCAmelCase : Tuple = flax_key_tuple[0] == pt_model.base_model_prefix
_UpperCAmelCase : Optional[Any] = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
_UpperCAmelCase : str = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
_UpperCAmelCase : int = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCAmelCase ) not in pt_model_dict:
# conv layer
_UpperCAmelCase : int = flax_key_tuple[:-1] + ("weight",)
_UpperCAmelCase : Optional[Any] = jnp.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase ) not in pt_model_dict:
# linear layer
_UpperCAmelCase : List[Any] = flax_key_tuple[:-1] + ("weight",)
_UpperCAmelCase : List[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_UpperCAmelCase : int = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
_UpperCAmelCase : Optional[Any] = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
_UpperCAmelCase : List[str] = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
_UpperCAmelCase : Union[str, Any] = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
_UpperCAmelCase : Optional[Any] = ".".join(_UpperCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
_UpperCAmelCase : Optional[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
_UpperCAmelCase : Optional[int] = key.split("." )
_UpperCAmelCase : Tuple = None
if key_components[-3::2] == ["parametrizations", "original0"]:
_UpperCAmelCase : Union[str, Any] = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
_UpperCAmelCase : int = key_components[-2] + "_v"
if name is not None:
_UpperCAmelCase : List[str] = key_components[:-3] + [name]
_UpperCAmelCase : Optional[Any] = ".".join(_UpperCAmelCase )
_UpperCAmelCase : Any = key
if flax_key in special_pt_names:
_UpperCAmelCase : Union[str, Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
_UpperCAmelCase : Optional[Any] = np.asarray(_UpperCAmelCase ) if not isinstance(_UpperCAmelCase , np.ndarray ) else flax_tensor
_UpperCAmelCase : int = torch.from_numpy(_UpperCAmelCase )
# remove from missing keys
missing_keys.remove(_UpperCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCAmelCase )
pt_model.load_state_dict(_UpperCAmelCase )
# re-transform missing_keys to list
_UpperCAmelCase : Optional[int] = list(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(_UpperCAmelCase ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
" use it for predictions and inference." )
else:
logger.warning(
F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"If your task is similar to the task the model of the checkpoint was trained on, "
F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 31 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_a = 'CompVis/stable-diffusion-v1-1'
_a = 'CompVis/stable-diffusion-v1-2'
_a = 'CompVis/stable-diffusion-v1-3'
_a = 'CompVis/stable-diffusion-v1-4'
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCAmelCase__ : StableDiffusionSafetyChecker, UpperCAmelCase__ : CLIPImageProcessor, UpperCAmelCase__ : bool = True, ):
super()._init_()
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, requires_safety_checker=UpperCAmelCase__, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea )
@property
def _lowercase ( self : List[str] ):
return {k: getattr(self, UpperCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )}
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Tuple, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Any, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Optional[int], ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
__lowercase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(UpperCAmelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 17 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=9_9 , SCREAMING_SNAKE_CASE__ : str=2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=1_0_0_0 , ) -> str:
a_ : Optional[Any] = parent
a_ : List[str] = batch_size
a_ : List[str] = seq_length
a_ : str = is_training
a_ : str = use_input_mask
a_ : int = use_token_type_ids
a_ : List[str] = use_labels
a_ : Optional[int] = vocab_size
a_ : Any = hidden_size
a_ : int = num_hidden_layers
a_ : List[str] = num_attention_heads
a_ : str = intermediate_size
a_ : Union[str, Any] = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : int = max_position_embeddings
a_ : Tuple = type_vocab_size
a_ : Optional[Any] = type_sequence_label_size
a_ : Tuple = initializer_range
a_ : Dict = num_labels
a_ : str = scope
a_ : Optional[int] = range_bbox
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ : int = bbox[i, j, 3]
a_ : str = bbox[i, j, 1]
a_ : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ : Tuple = bbox[i, j, 2]
a_ : List[str] = bbox[i, j, 0]
a_ : Union[str, Any] = t
a_ : List[Any] = None
if self.use_input_mask:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
a_ : List[Any] = None
if self.use_token_type_ids:
a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : int = None
a_ : Tuple = None
if self.use_labels:
a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str:
a_ : Any = LiltModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Any = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> int:
a_ : Any = self.num_labels
a_ : str = LiltForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> str:
a_ : Union[str, Any] = LiltForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : List[str] = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
a_ : int = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : List[Any] = config_and_inputs
a_ : Optional[int] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : str = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int:
return True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
a_ : str = LiltModelTester(self )
a_ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ : List[str] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[Any] = LiltModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : List[str] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(SCREAMING_SNAKE_CASE__ )
a_ : str = torch.tensor([[1, 2]] , device=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
a_ : str = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = torch.Size([1, 2, 7_6_8] )
a_ : int = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
| 32 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = "ssube/stable-diffusion-x4-upscaler-onnx"
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[str]=0 ):
__lowercase = floats_tensor((1, 3, 1_2_8, 1_2_8), rng=random.Random(UpperCAmelCase__ ) )
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _lowercase ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : int ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : str ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : Dict ):
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _lowercase ( self : Dict ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowercase ( self : str ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
__lowercase = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" )
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", scheduler=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 17 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : Union[str, Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : str = ["pixel_values"]
def __init__( self : str , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BICUBIC , A : bool = True , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : Dict[str, int] = None , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Dict , ) -> None:
super().__init__(**A )
lowercase_ : Tuple = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase_ : Dict = get_size_dict(A )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase_ : List[Any] = get_size_dict(A , default_to_square=A , param_name='''crop_size''' )
lowercase_ : Optional[Any] = do_resize
lowercase_ : List[str] = do_rescale
lowercase_ : Optional[int] = do_normalize
lowercase_ : List[str] = do_center_crop
lowercase_ : Union[str, Any] = crop_size
lowercase_ : Optional[Any] = size
lowercase_ : Optional[Any] = resample
lowercase_ : Optional[Any] = rescale_factor
lowercase_ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ : Tuple = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A ( self : str , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BILINEAR , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
lowercase_ : str = get_size_dict(A )
if "shortest_edge" in size:
lowercase_ : Dict = get_resize_output_image_size(A , size=size['''shortest_edge'''] , default_to_square=A )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowercase_ : List[Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(A , size=A , resample=A , data_format=A , **A )
def A ( self : str , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
lowercase_ : List[Any] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(A , size=(size['''height'''], size['''width''']) , data_format=A , **A )
def A ( self : Tuple , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Any ) -> np.ndarray:
return rescale(A , scale=A , data_format=A , **A )
def A ( self : Optional[int] , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Tuple , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def A ( self : Dict , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : int = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : List[str] , ) -> BatchFeature:
lowercase_ : str = do_resize if do_resize is not None else self.do_resize
lowercase_ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : str = crop_size if crop_size is not None else self.crop_size
lowercase_ : int = get_size_dict(A , param_name='''crop_size''' , default_to_square=A )
lowercase_ : Union[str, Any] = resample if resample is not None else self.resample
lowercase_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : Tuple = image_mean if image_mean is not None else self.image_mean
lowercase_ : Tuple = image_std if image_std is not None else self.image_std
lowercase_ : Union[str, Any] = size if size is not None else self.size
lowercase_ : str = get_size_dict(A )
if not is_batched(A ):
lowercase_ : List[Any] = [images]
if not valid_images(A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase_ : Tuple = [to_numpy_array(A ) for image in images]
if do_resize:
lowercase_ : List[str] = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
lowercase_ : List[Any] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
lowercase_ : List[Any] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
lowercase_ : Dict = [self.normalize(image=A , mean=A , std=A ) for image in images]
lowercase_ : List[str] = [to_channel_dimension_format(A , A ) for image in images]
lowercase_ : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=A , tensor_type=A )
| 33 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_a = datasets.utils.logging.get_logger(__name__)
_a = ['names', 'prefix']
_a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_a = ['encoding_errors', 'on_bad_lines']
_a = ['date_format']
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : str = ","
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[Union[int, List[int], str]] = "infer"
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[Union[int, str, List[int], List[str]]] = None
__UpperCAmelCase : Optional[Union[List[int], List[str]]] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[Literal["c", "python", "pyarrow"]] = None
__UpperCAmelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[Union[int, List[int]]] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[Union[str, List[str]]] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = "."
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = '"'
__UpperCAmelCase : int = 0
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : int = 1_0_0_0_0
__UpperCAmelCase : Optional[datasets.Features] = None
__UpperCAmelCase : Optional[str] = "strict"
__UpperCAmelCase : Literal["error", "warn", "skip"] = "error"
__UpperCAmelCase : Optional[str] = None
def _lowercase ( self : Tuple ):
if self.delimiter is not None:
__lowercase = self.delimiter
if self.column_names is not None:
__lowercase = self.column_names
@property
def _lowercase ( self : Union[str, Any] ):
__lowercase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), UpperCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : Tuple = CsvConfig
def _lowercase ( self : List[str] ):
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowercase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase__, (str, list, tuple) ):
__lowercase = data_files
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )]
__lowercase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__, gen_kwargs={"files": files} ) )
return splits
def _lowercase ( self : Dict, UpperCAmelCase__ : pa.Table ):
if self.config.features is not None:
__lowercase = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
__lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=UpperCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__lowercase = table_cast(UpperCAmelCase__, UpperCAmelCase__ )
return pa_table
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : List[str] ):
__lowercase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__lowercase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ):
__lowercase = pd.read_csv(UpperCAmelCase__, iterator=UpperCAmelCase__, dtype=UpperCAmelCase__, **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCAmelCase__ ):
__lowercase = pa.Table.from_pandas(UpperCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase__ )}: {e}""" )
raise
| 17 | 0 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
A =WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def snake_case_ (_a : Tuple ):
UpperCAmelCase = test_results.split(''' ''' )
UpperCAmelCase = 0
UpperCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case_ (_a : Optional[int] ):
UpperCAmelCase = {}
UpperCAmelCase = None
UpperCAmelCase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(R'''_ \[doctest\]''' , _a ):
UpperCAmelCase = True
UpperCAmelCase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
UpperCAmelCase = line
UpperCAmelCase = False
return failures
class _a :
def __init__( self : Dict , lowercase : str , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = title
UpperCAmelCase = doc_test_results['''time_spent'''].split(''',''' )[0]
UpperCAmelCase = doc_test_results['''success''']
UpperCAmelCase = doc_test_results['''failures''']
UpperCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase = doc_test_results
@property
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = [self._time_spent]
UpperCAmelCase = 0
for time in time_spent:
UpperCAmelCase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowercase ) == 1:
UpperCAmelCase = [0, 0, time_parts[0]]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return f"{int(lowercase )}h{int(lowercase )}m{int(lowercase )}s"
@property
def A ( self : int ):
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def A ( self : int ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = 40
UpperCAmelCase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(lowercase , lowercase )}
UpperCAmelCase = ''''''
for category, failures in category_failures.items():
if len(lowercase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowercase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowercase )
@staticmethod
def A ( ):
'''simple docstring'''
UpperCAmelCase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(lowercase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=lowercase , )
def A ( self : Optional[Any] ):
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
UpperCAmelCase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
UpperCAmelCase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=lowercase , )
def A ( self : Optional[int] , lowercase : Any , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = ''''''
for key, value in failures.items():
UpperCAmelCase = value[:200] + ''' [Truncated]''' if len(lowercase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
UpperCAmelCase = job_name
UpperCAmelCase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
UpperCAmelCase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def A ( self : Optional[int] ):
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
UpperCAmelCase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
UpperCAmelCase = sorted(self.doc_test_results.items() , key=lambda lowercase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
UpperCAmelCase = f"*Num failures* :{len(job_result['failed'] )} \n"
UpperCAmelCase = job_result['''failures''']
UpperCAmelCase = self.get_reply_blocks(lowercase , lowercase , lowercase , text=lowercase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f"Results for {job}" , blocks=lowercase , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def snake_case_ ():
UpperCAmelCase = os.environ['''GITHUB_RUN_ID''']
UpperCAmelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
UpperCAmelCase = requests.get(_a ).json()
UpperCAmelCase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
UpperCAmelCase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(_a ):
UpperCAmelCase = requests.get(url + F"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , _a )
return {}
def snake_case_ (_a : str ):
UpperCAmelCase = {}
if os.path.exists(_a ):
UpperCAmelCase = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='''utf-8''' ) as f:
UpperCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"Could not open {os.path.join(_a , _a )}." ) from e
return _artifact
def snake_case_ ():
class _a :
def __init__( self : Any , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = name
UpperCAmelCase = []
def __str__( self : Tuple ):
'''simple docstring'''
return self.name
def A ( self : List[Any] , lowercase : str ):
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
UpperCAmelCase = {}
UpperCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase = directory
if artifact_name not in _available_artifacts:
UpperCAmelCase = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
A =get_job_links()
A =retrieve_available_artifacts()
A =collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
A ={
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
A =github_actions_job_links.get('run_doctests')
A =available_artifacts['doc_tests_gpu_test_reports'].paths[0]
A =retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
A , A , A =handle_test_results(artifact['stats'])
A =failed
A =success
A =time_spent[1:-1] + ', '
A =extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
A =line.replace('FAILED ', '')
A =line.split()[0].replace('\n', '')
if "::" in line:
A , A =line.split('::')
else:
A , A =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
A =docs[file_regex]
doc_test_results[category]["failed"].append(test)
A =all_failures[test] if test in all_failures else 'N/A'
A =failure
break
A =Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 34 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
_a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
_a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
_a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ):
__lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 17 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : List[Any] ):
snake_case__ : str = tempfile.mkdtemp()
snake_case__ : Tuple = 5
# Realm tok
snake_case__ : Union[str, Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
snake_case__ : Union[str, Any] = os.path.join(snake_case_ , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
snake_case__ : List[Any] = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
def lowerCamelCase ( self : List[str] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def lowerCamelCase ( self : str ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self : int ):
snake_case__ : List[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCamelCase ( self : int ):
snake_case__ : str = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def lowerCamelCase ( self : List[str] ):
snake_case__ : str = np.array(
[
b"""This is the first record""",
b"""This is the second record""",
b"""This is the third record""",
b"""This is the fourth record""",
b"""This is the fifth record""",
b"""This is a longer longer longer record""",
] , dtype=snake_case_ , )
return block_records
def lowerCamelCase ( self : int ):
snake_case__ : Dict = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCamelCase ( self : int ):
snake_case__ : Tuple = self.get_config()
snake_case__ : Any = self.get_dummy_retriever()
snake_case__ : str = retriever.tokenizer
snake_case__ : Optional[int] = np.array([0, 3] , dtype="""long""" )
snake_case__ : List[Any] = tokenizer(["""Test question"""] ).input_ids
snake_case__ : List[str] = tokenizer(
["""the fourth"""] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids
snake_case__ : List[Any] = config.reader_seq_len
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = retriever(
snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors="""np""" )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : List[str] = self.get_config()
snake_case__ : List[str] = self.get_dummy_retriever()
snake_case__ : str = retriever.tokenizer
snake_case__ : List[Any] = np.array([0, 3, 5] , dtype="""long""" )
snake_case__ : Union[str, Any] = tokenizer(["""Test question"""] ).input_ids
snake_case__ : Optional[int] = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids
snake_case__ : Any = config.reader_seq_len
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = retriever(
snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors="""np""" )
self.assertEqual([False, True, True] , snake_case_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , snake_case_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : List[str] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
snake_case__ : Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
snake_case__ : Tuple = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case__ : List[str] = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
| 35 |
"""simple docstring"""
from collections.abc import Sequence
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_))
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
__lowercase = 0.0
for coeff in reversed(UpperCamelCase_):
__lowercase = result * x + coeff
return result
if __name__ == "__main__":
_a = (0.0, 0.0, 5.0, 9.3, 7.0)
_a = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self, __a):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]):
_lowerCAmelCase : Tuple = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = "sshleifer/tiny-gpt2"
_lowerCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], eager_mode=__a, multi_process=__a, )
_lowerCAmelCase : str = TensorFlowBenchmark(__a)
_lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = "sgugger/tiny-distilbert-classification"
_lowerCAmelCase : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, only_pretrain_model=__a, )
_lowerCAmelCase : Tuple = TensorFlowBenchmark(__a)
_lowerCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
_lowerCAmelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, )
_lowerCAmelCase : List[Any] = TensorFlowBenchmark(__a)
_lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = "sshleifer/tiny-gpt2"
_lowerCAmelCase : Dict = AutoConfig.from_pretrained(__a)
_lowerCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], eager_mode=__a, multi_process=__a, )
_lowerCAmelCase : Any = TensorFlowBenchmark(__a, [config])
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
_lowerCAmelCase : int = AutoConfig.from_pretrained(__a)
_lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, )
_lowerCAmelCase : Any = TensorFlowBenchmark(__a, [config])
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "sshleifer/tiny-gpt2"
_lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, )
_lowerCAmelCase : List[Any] = TensorFlowBenchmark(__a)
_lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = "sshleifer/tiny-gpt2"
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(__a)
_lowerCAmelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, )
_lowerCAmelCase : str = TensorFlowBenchmark(__a, [config])
_lowerCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = "patrickvonplaten/t5-tiny-random"
_lowerCAmelCase : List[str] = AutoConfig.from_pretrained(__a)
_lowerCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, )
_lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(__a, configs=[config])
_lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU")) == 0, "Cannot do xla on CPU.")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = "sshleifer/tiny-gpt2"
_lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], use_xla=__a, multi_process=__a, )
_lowerCAmelCase : Tuple = TensorFlowBenchmark(__a)
_lowerCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], inference=__a, save_to_csv=__a, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(__a, "inf_time.csv"), inference_memory_csv_file=os.path.join(__a, "inf_mem.csv"), env_info_csv_file=os.path.join(__a, "env.csv"), multi_process=__a, )
_lowerCAmelCase : List[str] = TensorFlowBenchmark(__a)
benchmark.run()
self.assertTrue(Path(os.path.join(__a, "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__a, "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__a, "env.csv")).exists())
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__a):
self.assertTrue(hasattr(__a, "sequential"))
self.assertTrue(hasattr(__a, "cumulative"))
self.assertTrue(hasattr(__a, "current"))
self.assertTrue(hasattr(__a, "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID], inference=__a, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(__a, "log.txt"), log_print=__a, trace_memory_line_by_line=__a, eager_mode=__a, multi_process=__a, )
_lowerCAmelCase : List[Any] = TensorFlowBenchmark(__a)
_lowerCAmelCase : Tuple = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
self.assertTrue(Path(os.path.join(__a, "log.txt")).exists())
| 36 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : str ):
super().__init__()
__lowercase = model
__lowercase = 2
__lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels )
def _lowercase ( self : Optional[int] ):
pass
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = LongformerModel.from_pretrained(UpperCamelCase_)
__lowercase = LightningModel(UpperCamelCase_)
__lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu"))
lightning_model.load_state_dict(ckpt["state_dict"])
# init longformer question answering model
__lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_)
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase_)
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 17 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@register_to_config
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ) -> Dict:
super().__init__()
lowerCAmelCase__ : List[Any] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowerCAmelCase__ : Dict = torch.zeros(__UpperCAmelCase ,__UpperCAmelCase )
else:
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Union[str, Any] = torch.nn.Parameter(__UpperCAmelCase )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : VQModel
__lowercase : CLIPTextModel
__lowercase : CLIPTokenizer
__lowercase : TransformeraDModel
__lowercase : LearnedClassifierFreeSamplingEmbeddings
__lowercase : VQDiffusionScheduler
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> Union[str, Any]:
super().__init__()
self.register_modules(
vqvae=__UpperCAmelCase ,transformer=__UpperCAmelCase ,text_encoder=__UpperCAmelCase ,tokenizer=__UpperCAmelCase ,scheduler=__UpperCAmelCase ,learned_classifier_free_sampling_embeddings=__UpperCAmelCase ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Optional[int] = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else 1
# get prompt text embeddings
lowerCAmelCase__ : int = self.tokenizer(
__UpperCAmelCase ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase__ : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCAmelCase__ : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase__ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowerCAmelCase__ : List[str] = prompt_embeds / prompt_embeds.norm(dim=-1 ,keepdim=__UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
lowerCAmelCase__ : Any = prompt_embeds.repeat_interleave(__UpperCAmelCase ,dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowerCAmelCase__ : Dict = self.learned_classifier_free_sampling_embeddings.embeddings
lowerCAmelCase__ : Optional[Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase ,1 ,1 )
else:
lowerCAmelCase__ : Optional[Any] = [""""""] * batch_size
lowerCAmelCase__ : int = text_input_ids.shape[-1]
lowerCAmelCase__ : str = self.tokenizer(
__UpperCAmelCase ,padding="""max_length""" ,max_length=__UpperCAmelCase ,truncation=__UpperCAmelCase ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
lowerCAmelCase__ : Union[str, Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 ,keepdim=__UpperCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ : Optional[int] = negative_prompt_embeds.shape[1]
lowerCAmelCase__ : int = negative_prompt_embeds.repeat(1 ,__UpperCAmelCase ,1 )
lowerCAmelCase__ : Tuple = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,__UpperCAmelCase ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase__ : List[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase = 100 ,__UpperCAmelCase = 5.0 ,__UpperCAmelCase = 1.0 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = "pil" ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = 1 ,) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = 1
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : int = len(__UpperCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}""" )
lowerCAmelCase__ : Optional[Any] = batch_size * num_images_per_prompt
lowerCAmelCase__ : Union[str, Any] = guidance_scale > 1.0
lowerCAmelCase__ : Any = self._encode_prompt(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase ,__UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__UpperCAmelCase )}.""" )
# get the initial completely masked latents unless the user supplied it
lowerCAmelCase__ : Dict = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowerCAmelCase__ : List[Any] = self.transformer.num_vector_embeds - 1
lowerCAmelCase__ : int = torch.full(__UpperCAmelCase ,__UpperCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
lowerCAmelCase__ : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase ,device=self.device )
lowerCAmelCase__ : Any = self.scheduler.timesteps.to(self.device )
lowerCAmelCase__ : int = latents
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
lowerCAmelCase__ : Union[str, Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowerCAmelCase__ : List[str] = self.transformer(__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,timestep=__UpperCAmelCase ).sample
if do_classifier_free_guidance:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model_output.chunk(2 )
lowerCAmelCase__ : List[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCAmelCase ,dim=1 ,keepdim=__UpperCAmelCase )
lowerCAmelCase__ : List[str] = self.truncate(__UpperCAmelCase ,__UpperCAmelCase )
# remove `log(0)`'s (`-inf`s)
lowerCAmelCase__ : Optional[Any] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ : List[str] = self.scheduler.step(__UpperCAmelCase ,timestep=__UpperCAmelCase ,sample=__UpperCAmelCase ,generator=__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : Tuple = self.vqvae.config.vq_embed_dim
lowerCAmelCase__ : Optional[int] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowerCAmelCase__ : List[str] = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase ,shape=__UpperCAmelCase )
lowerCAmelCase__ : int = self.vqvae.decode(__UpperCAmelCase ,force_not_quantize=__UpperCAmelCase ).sample
lowerCAmelCase__ : Dict = (image / 2 + 0.5).clamp(0 ,1 )
lowerCAmelCase__ : Union[str, Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ : Dict = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> torch.FloatTensor:
lowerCAmelCase__ , lowerCAmelCase__ : Any = torch.sort(__UpperCAmelCase ,1 ,descending=__UpperCAmelCase )
lowerCAmelCase__ : Any = torch.exp(__UpperCAmelCase )
lowerCAmelCase__ : Dict = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowerCAmelCase__ : Dict = torch.full_like(keep_mask[:, 0:1, :] ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = torch.cat((all_true, keep_mask) ,dim=1 )
lowerCAmelCase__ : str = keep_mask[:, :-1, :]
lowerCAmelCase__ : Optional[Any] = keep_mask.gather(1 ,indices.argsort(1 ) )
lowerCAmelCase__ : List[Any] = log_p_x_0.clone()
lowerCAmelCase__ : Optional[Any] = -torch.inf # -inf = log(0)
return rv
| 37 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, )
assert hasattr(self, "env" )
def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ):
# configuration for running training on smdistributed Model Parallel
__lowercase = {
"enabled": True,
"processes_per_host": 8,
}
__lowercase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_0_0,
}, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", )
def _lowercase ( self : Tuple, UpperCAmelCase__ : int ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ):
# create estimator
__lowercase = self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
| 17 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = R"""\w+[.]\d+"""
UpperCamelCase :int = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
UpperCamelCase :List[str] = key.replace(__magic_name__ , """_""".join(pat.split(""".""" ) ) )
return key
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase :Optional[Any] = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCamelCase :List[str] = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCamelCase :Optional[Any] = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCamelCase :Optional[Any] = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase :Dict = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCamelCase :Any = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase :Dict = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
UpperCamelCase :Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase :str = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase :Optional[int] = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=42 ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :Any = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCamelCase :Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) )
UpperCamelCase :Any = flatten_dict(__magic_name__ )
UpperCamelCase :Optional[int] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase :Union[str, Any] = rename_key(__magic_name__ )
UpperCamelCase :Optional[Any] = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
UpperCamelCase , UpperCamelCase :Tuple = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
UpperCamelCase :List[Any] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 38 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "openai/whisper-base"
__UpperCAmelCase : Union[str, Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCAmelCase : List[str] = "transcriber"
__UpperCAmelCase : Optional[Any] = WhisperProcessor
__UpperCAmelCase : str = WhisperForConditionalGeneration
__UpperCAmelCase : List[str] = ["audio"]
__UpperCAmelCase : Tuple = ["text"]
def _lowercase ( self : str, UpperCAmelCase__ : int ):
return self.pre_processor(UpperCAmelCase__, return_tensors="pt" ).input_features
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ):
return self.model.generate(inputs=UpperCAmelCase__ )
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int] ):
return self.pre_processor.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )[0]
| 17 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = '''▁'''
_a = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_a = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
_a = {
'''facebook/xglm-564M''': 2048,
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_UpperCAmelCase = 7
_UpperCAmelCase = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
_UpperCAmelCase = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_UpperCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase ))
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase ))
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = ''.join(UpperCAmelCase ).replace(UpperCAmelCase , ' ' ).strip()
return out_string
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , 'wb' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 39 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase_, torch.Tensor):
return image
elif isinstance(UpperCamelCase_, PIL.Image.Image):
__lowercase = [image]
if isinstance(image[0], PIL.Image.Image):
__lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
__lowercase = np.concatenate(UpperCamelCase_, axis=0)
__lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0
__lowercase = image.transpose(0, 3, 1, 2)
__lowercase = 2.0 * image - 1.0
__lowercase = torch.from_numpy(UpperCamelCase_)
elif isinstance(image[0], torch.Tensor):
__lowercase = torch.cat(UpperCamelCase_, dim=0)
return image
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int:
'''simple docstring'''
if not isinstance(UpperCamelCase_, np.ndarray):
__lowercase = True
__lowercase = va.device
__lowercase = va.cpu().numpy()
__lowercase = va.cpu().numpy()
__lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_)))
if np.abs(UpperCamelCase_) > DOT_THRESHOLD:
__lowercase = (1 - t) * va + t * va
else:
__lowercase = np.arccos(UpperCamelCase_)
__lowercase = np.sin(UpperCamelCase_)
__lowercase = theta_a * t
__lowercase = np.sin(UpperCamelCase_)
__lowercase = np.sin(theta_a - theta_t) / sin_theta_a
__lowercase = sin_theta_t / sin_theta_a
__lowercase = sa * va + sa * va
if inputs_are_torch:
__lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_)
return va
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int:
'''simple docstring'''
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]:
'''simple docstring'''
for param in model.parameters():
__lowercase = value
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ):
super().__init__()
self.register_modules(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, )
__lowercase = (
feature_extractor.size
if isinstance(feature_extractor.size, UpperCAmelCase__ )
else feature_extractor.size["shortest_edge"]
)
__lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, UpperCAmelCase__ )
set_requires_grad(self.clip_model, UpperCAmelCase__ )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : int ):
self.enable_attention_slicing(UpperCAmelCase__ )
def _lowercase ( self : str ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ):
# get the original timestep using init_timestep
__lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ )
__lowercase = max(num_inference_steps - init_timestep, 0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ):
if not isinstance(UpperCAmelCase__, torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" )
__lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ )
]
__lowercase = torch.cat(UpperCAmelCase__, dim=0 )
else:
__lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 0.18_215 * init_latents
__lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 )
__lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
# get latents
__lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = init_latents
return latents
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ):
__lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
__lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ):
__lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ )
__lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ):
__lowercase = latents.detach().requires_grad_()
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowercase = self.scheduler.alphas_cumprod[timestep]
__lowercase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowercase = torch.sqrt(UpperCAmelCase__ )
__lowercase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = self.scheduler.sigmas[index]
__lowercase = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * sample
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ )
__lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype )
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale
__lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0]
if isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = latents.detach() + grads * (sigma**2)
__lowercase = noise_pred_original
else:
__lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1:
__lowercase = [generator] + [None] * (batch_size - 1)
__lowercase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__lowercase = [x[0] for x in coca_is_none if x[1]]
__lowercase = ", ".join(UpperCAmelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
if style_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
# get prompt text embeddings for content and style
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# duplicate text embeddings for each generation per prompt
__lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# set timesteps
__lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowercase = {}
if accepts_offset:
__lowercase = 1
self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device )
__lowercase = timesteps[:1].repeat(UpperCAmelCase__ )
# Preprocess image
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if clip_guidance_scale > 0:
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = slerp(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = content_text_input.input_ids.shape[-1]
__lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to(
self.device )
else:
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
# check if the scheduler accepts generator
__lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowercase = generator
with self.progress_bar(total=UpperCAmelCase__ ):
for i, t in enumerate(UpperCAmelCase__ ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowercase ,__lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowercase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowercase ,__lowercase = self.cond_fn(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * latents
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
| 17 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def __snake_case ( self : Tuple):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __snake_case ( self : List[str]):
a : Optional[int] = ort.SessionOptions()
a : str = False
return options
def __snake_case ( self : Optional[int]):
a : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
a : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
a : List[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : int = "A red cat sitting on a park bench"
a : Optional[Any] = np.random.RandomState(0)
a : Optional[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__UpperCAmelCase , output_type="np" , )
a : str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1e-2
| 40 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
__UpperCAmelCase : Tuple = XGLMConfig
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Union[str, Any] = "gelu"
def __init__( self : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=1_4, UpperCAmelCase__ : str=7, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[Any]=True, UpperCAmelCase__ : int=True, UpperCAmelCase__ : List[str]=9_9, UpperCAmelCase__ : Union[str, Any]=3_2, UpperCAmelCase__ : Union[str, Any]=2, UpperCAmelCase__ : Union[str, Any]=4, UpperCAmelCase__ : Tuple=3_7, UpperCAmelCase__ : List[Any]="gelu", UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Tuple=5_1_2, UpperCAmelCase__ : Optional[Any]=0.02, ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = d_model
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = ffn_dim
__lowercase = activation_function
__lowercase = activation_dropout
__lowercase = attention_dropout
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = None
__lowercase = 0
__lowercase = 2
__lowercase = 1
def _lowercase ( self : Union[str, Any] ):
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _lowercase ( self : Tuple ):
__lowercase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = self.get_config()
__lowercase = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self : List[Any] ):
return XGLMConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=UpperCAmelCase__, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=UpperCAmelCase__, )
def _lowercase ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCAmelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def _lowercase ( self : Optional[Any] ):
__lowercase = TFXGLMModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, n_embd=3_7 )
def _lowercase ( self : Any ):
self.config_tester.run_common_tests()
@slow
def _lowercase ( self : List[str] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFXGLMModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _lowercase ( self : int ):
super().test_resize_token_embeddings()
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int]=True ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]], dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowercase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[Any] ):
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
__lowercase = tokenizer("Today is a nice day and", return_tensors="tf" )
__lowercase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, seed=[7, 0] )
__lowercase = tokenizer.decode(output_ids[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
@slow
def _lowercase ( self : Dict ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = "left"
# use different length sentences to test batching
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
__lowercase = tokenizer(UpperCAmelCase__, return_tensors="tf", padding=UpperCAmelCase__ )
__lowercase = inputs["input_ids"]
__lowercase = model.generate(input_ids=UpperCAmelCase__, attention_mask=inputs["attention_mask"], max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[0], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[1], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_non_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, [non_padded_sentence, padded_sentence] )
| 17 | 0 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_A : str =(
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_A : list[int] =[ord(letter) for letter in string.ascii_lowercase]
_A : set[int] ={ord(char) for char in VALID_CHARS}
_A : list[str] =["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> str | None:
lowerCamelCase__ : str = ""
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
for keychar, cipherchar in zip(cycle(UpperCamelCase ) , UpperCamelCase ):
lowerCamelCase__ : List[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCamelCase )
return decoded
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list[str]:
lowerCamelCase__ : list[str] = []
for key in product(UpperCamelCase , repeat=3 ):
lowerCamelCase__ : List[Any] = try_key(UpperCamelCase , UpperCamelCase )
if encoded is not None:
possibles.append(UpperCamelCase )
return possibles
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "p059_cipher.txt" ) -> int:
lowerCamelCase__ : list[int]
lowerCamelCase__ : list[str]
lowerCamelCase__ : str
lowerCamelCase__ : str
lowerCamelCase__ : str = Path(UpperCamelCase ).parent.joinpath(UpperCamelCase ).read_text(encoding="""utf-8""" )
lowerCamelCase__ : List[Any] = [int(UpperCamelCase ) for number in data.strip().split(""",""" )]
lowerCamelCase__ : Optional[int] = filter_valid_chars(UpperCamelCase )
for common_word in COMMON_WORDS:
lowerCamelCase__ : Dict = filter_common_word(UpperCamelCase , UpperCamelCase )
if len(UpperCamelCase ) == 1:
break
lowerCamelCase__ : Union[str, Any] = possibles[0]
return sum(ord(UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 41 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_a = '__DUMMY_TRANSFORMERS_USER__'
_a = 'Dummy User'
_a = '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'
_a = 'https://hub-ci.huggingface.co'
_a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
_a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
_a = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _A ( UpperCamelCase_ : List[Any]) -> Tuple:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : int) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT", UpperCamelCase_)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Dict:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : List[Any]) -> List[str]:
'''simple docstring'''
HfFolder.save_token(UpperCamelCase_)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def _A ( ) -> List[Any]:
'''simple docstring'''
return HfApi(endpoint=UpperCamelCase_)
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi) -> List[Any]:
'''simple docstring'''
__lowercase = HfFolder.get_token()
HfFolder.save_token(UpperCamelCase_)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Dict) -> int:
'''simple docstring'''
def _cleanup_repo(UpperCamelCase_ : Optional[int]):
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Any:
'''simple docstring'''
@contextmanager
def _temporary_repo(UpperCamelCase_ : Any):
try:
yield repo_id
finally:
cleanup_repo(UpperCamelCase_)
return _temporary_repo
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : str, UpperCamelCase_ : Optional[int]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data/text_data.txt", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Any, UpperCamelCase_ : Dict) -> Optional[int]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]) -> int:
'''simple docstring'''
__lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Dict, UpperCamelCase_ : Any) -> int:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> str:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 17 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : str = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """mobilenet_v1"""
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=2_24 , lowerCAmelCase_=1.0 , lowerCAmelCase_=8 , lowerCAmelCase_="relu6" , lowerCAmelCase_=True , lowerCAmelCase_=0.999 , lowerCAmelCase_=0.02 , lowerCAmelCase_=0.001 , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = min_depth
_snake_case = hidden_act
_snake_case = tf_padding
_snake_case = classifier_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = version.parse("""1.11""" )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return 1E-4
| 42 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : int = "time_series_transformer"
__UpperCAmelCase : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : int, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : str = "student_t", UpperCAmelCase__ : str = "nll", UpperCAmelCase__ : int = 1, UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7], UpperCAmelCase__ : Optional[Union[str, bool]] = "mean", UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : str = "gelu", UpperCAmelCase__ : int = 6_4, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : int = 1_0_0, UpperCAmelCase__ : float = 0.02, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : List[str], ):
# time series specific configuration
__lowercase = prediction_length
__lowercase = context_length or prediction_length
__lowercase = distribution_output
__lowercase = loss
__lowercase = input_size
__lowercase = num_time_features
__lowercase = lags_sequence
__lowercase = scaling
__lowercase = num_dynamic_real_features
__lowercase = num_static_real_features
__lowercase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__lowercase = cardinality
else:
__lowercase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__lowercase = embedding_dimension
else:
__lowercase = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality]
__lowercase = num_parallel_samples
# Transformer architecture configuration
__lowercase = input_size * len(UpperCAmelCase__ ) + self._number_of_features
__lowercase = d_model
__lowercase = encoder_attention_heads
__lowercase = decoder_attention_heads
__lowercase = encoder_ffn_dim
__lowercase = decoder_ffn_dim
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = activation_function
__lowercase = init_std
__lowercase = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__, **UpperCAmelCase__ )
@property
def _lowercase ( self : Optional[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 17 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = CycleDiffusionPipeline
a__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
a__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
a__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self) -> Any:
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCamelCase :Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , num_train_timesteps=1_000 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
__UpperCamelCase :Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__UpperCamelCase :List[str] = CLIPTextModel(__lowercase)
__UpperCamelCase :Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
__UpperCamelCase :str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> str:
__UpperCamelCase :List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
__UpperCamelCase :List[Any] = image / 2 + 0.5
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Optional[int] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Optional[int] = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Any = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Optional[Any] = self.get_dummy_components()
__UpperCamelCase :List[str] = CycleDiffusionPipeline(**__lowercase)
__UpperCamelCase :Tuple = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :Tuple = pipe(**__lowercase)
__UpperCamelCase :Tuple = output.images
__UpperCamelCase :Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCamelCase :int = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''')
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Any = self.get_dummy_components()
for name, module in components.items():
if hasattr(__lowercase , '''half'''):
__UpperCamelCase :Any = module.half()
__UpperCamelCase :int = CycleDiffusionPipeline(**__lowercase)
__UpperCamelCase :Optional[int] = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :str = pipe(**__lowercase)
__UpperCamelCase :Any = output.images
__UpperCamelCase :str = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCamelCase :Optional[Any] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self) -> List[Any]:
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''')
def UpperCamelCase__ ( self) -> Optional[int]:
return super().test_inference_batch_single_identical()
@skip_mps
def UpperCamelCase__ ( self) -> Tuple:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCamelCase__ ( self) -> Any:
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self) -> Tuple:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''')
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''')
__UpperCamelCase :int = init_image.resize((512, 512))
__UpperCamelCase :List[Any] = '''CompVis/stable-diffusion-v1-4'''
__UpperCamelCase :Optional[Any] = DDIMScheduler.from_pretrained(__lowercase , subfolder='''scheduler''')
__UpperCamelCase :Optional[Any] = CycleDiffusionPipeline.from_pretrained(
__lowercase , scheduler=__lowercase , safety_checker=__lowercase , torch_dtype=torch.floataa , revision='''fp16''')
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
__UpperCamelCase :Dict = '''A black colored car'''
__UpperCamelCase :List[str] = '''A blue colored car'''
__UpperCamelCase :Optional[int] = torch.manual_seed(0)
__UpperCamelCase :Optional[int] = pipe(
prompt=__lowercase , source_prompt=__lowercase , image=__lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowercase , output_type='''np''' , )
__UpperCamelCase :List[Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image).max() < 5E-1
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''')
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''')
__UpperCamelCase :List[Any] = init_image.resize((512, 512))
__UpperCamelCase :Optional[int] = '''CompVis/stable-diffusion-v1-4'''
__UpperCamelCase :str = DDIMScheduler.from_pretrained(__lowercase , subfolder='''scheduler''')
__UpperCamelCase :List[str] = CycleDiffusionPipeline.from_pretrained(__lowercase , scheduler=__lowercase , safety_checker=__lowercase)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
__UpperCamelCase :Tuple = '''A black colored car'''
__UpperCamelCase :str = '''A blue colored car'''
__UpperCamelCase :Tuple = torch.manual_seed(0)
__UpperCamelCase :int = pipe(
prompt=__lowercase , source_prompt=__lowercase , image=__lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowercase , output_type='''np''' , )
__UpperCamelCase :Optional[Any] = output.images
assert np.abs(image - expected_image).max() < 2E-2
| 43 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : List[Any] ):
pass
def _A ( UpperCamelCase_ : Union[str, Any]) -> Any:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_a = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any] ):
__lowercase = pipeline(
"document-question-answering", model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
__lowercase = INVOICE_URL
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
__lowercase = "What is the placebo?"
__lowercase = [
{
"image": load_image(UpperCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _lowercase ( self : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any ):
__lowercase = dqa_pipeline(UpperCAmelCase__, top_k=2 )
self.assertEqual(
UpperCAmelCase__, [
[
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
]
]
* 3, )
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2" )
__lowercase = INVOICE_URL
__lowercase = "How many cats are there?"
__lowercase = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 3_8, "end": 3_9},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 3_8, "end": 4_0},
]
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
# We can optionnally pass directly the words and bounding boxes
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = []
__lowercase = []
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, words=UpperCAmelCase__, boxes=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : List[str] ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
],
]
* 2, )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Union[str, Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
@slow
@require_torch
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _lowercase ( self : List[Any] ):
pass
| 17 | 0 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_a : List[Any] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ) -> List[str]:
for attribute in key.split(""".""" ):
_lowerCAmelCase : Tuple = getattr(_lowerCamelCase ,_lowerCamelCase )
if weight_type is not None:
_lowerCAmelCase : int = getattr(_lowerCamelCase ,_lowerCamelCase ).shape
else:
_lowerCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
_lowerCAmelCase : str = value
elif weight_type == "weight_v":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "bias":
_lowerCAmelCase : Optional[int] = value
else:
_lowerCAmelCase : List[Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Dict ) -> Any:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = fairseq_model.state_dict()
_lowerCAmelCase : int = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : int = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,hf_model.config.feat_extract_norm == """group""" ,)
_lowerCAmelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase : List[Any] = True
if "*" in mapped_key:
_lowerCAmelCase : int = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase : Any = mapped_key.replace("""*""" ,_lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : int = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase : List[str] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
_lowerCAmelCase : Tuple = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase : str = """weight"""
else:
_lowerCAmelCase : Dict = None
set_recursively(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Any ,_lowerCamelCase : Tuple ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[Any] ) -> List[str]:
_lowerCAmelCase : List[Any] = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : Optional[Any] = name.split(""".""" )
_lowerCAmelCase : Dict = int(items[0] )
_lowerCAmelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_lowerCAmelCase : List[str] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_lowerCAmelCase : Any = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_lowerCAmelCase : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_lowerCAmelCase : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : str=None ) -> Dict:
# load the pre-trained checkpoints
_lowerCAmelCase : int = torch.load(_lowerCamelCase )
_lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint["""cfg"""] )
_lowerCAmelCase : Tuple = WavLMOrig(_lowerCamelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
_lowerCAmelCase : Any = WavLMConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : Any = WavLMConfig()
_lowerCAmelCase : Union[str, Any] = WavLMModel(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase ,_lowerCamelCase )
hf_wavlm.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_a : int = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 44 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict, *, # begin keyword-only arguments
UpperCAmelCase__ : str="<s>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : str="</s>", UpperCAmelCase__ : Optional[Any]="<unk>", UpperCAmelCase__ : List[Any]=None, ):
__lowercase ,__lowercase ,__lowercase ,__lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase__ )
__lowercase = len(self.symbols )
def __eq__( self : List[str], UpperCAmelCase__ : Dict ):
return self.indices == other.indices
def __getitem__( self : Optional[int], UpperCAmelCase__ : List[str] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ):
return len(self.symbols )
def __contains__( self : Any, UpperCAmelCase__ : Optional[Any] ):
return sym in self.indices
@classmethod
def _lowercase ( cls : List[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = cls()
d.add_from_file(UpperCAmelCase__ )
return d
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : str=False ):
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(UpperCAmelCase__ )
self.count.append(UpperCAmelCase__ )
return idx
def _lowercase ( self : Any, UpperCAmelCase__ : str ):
return 0
def _lowercase ( self : Tuple, UpperCAmelCase__ : List[Any] ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
try:
with open(UpperCAmelCase__, "r", encoding="utf-8" ) as fd:
self.add_from_file(UpperCAmelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCAmelCase__ ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(UpperCAmelCase__ )
for line in lines[indices_start_line:]:
try:
__lowercase ,__lowercase = line.rstrip().rsplit(" ", 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase ,__lowercase = line.rsplit(" ", 1 )
else:
__lowercase = False
__lowercase = int(UpperCAmelCase__ )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(UpperCAmelCase__ ) )
self.add_symbol(UpperCAmelCase__, n=UpperCAmelCase__, overwrite=UpperCAmelCase__ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def _A ( UpperCamelCase_ : int) -> str:
'''simple docstring'''
__lowercase = dict((re.sub(r"@@$", "", UpperCamelCase_), v) if k.endswith("@@") else (re.sub(r"$", "</w>", UpperCamelCase_), v) for k, v in d.items())
__lowercase = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowercase = d[k] # restore
return da
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> List[Any]:
'''simple docstring'''
if not os.path.exists(UpperCamelCase_):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""")
os.makedirs(UpperCamelCase_, exist_ok=UpperCamelCase_)
print(F"""Writing results to {pytorch_dump_folder_path}""")
# handle various types of models
__lowercase = os.path.join(UpperCamelCase_, "checkpoint.pt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""")
__lowercase = torch.load(UpperCamelCase_, map_location="cpu")
__lowercase = chkpt["cfg"]["model"]
# dicts
__lowercase = os.path.join(UpperCamelCase_, "dict.txt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {dict_file} does not exist!""")
__lowercase = Dictionary.load(UpperCamelCase_)
__lowercase = rewrite_dict_keys(src_dict.indices)
__lowercase = len(UpperCamelCase_)
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["vocab_file"])
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# merges_file (bpecodes)
__lowercase = os.path.join(UpperCamelCase_, "bpecodes")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""")
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["merges_file"])
shutil.copyfile(UpperCamelCase_, UpperCamelCase_)
# model config
__lowercase = os.path.join(UpperCamelCase_, "config.json")
__lowercase = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# tokenizer config
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
__lowercase = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F"""Generating {biogpt_tokenizer_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# model
__lowercase = chkpt["model"]
# remove unneeded keys
__lowercase = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase_, UpperCamelCase_)
__lowercase = list(model_state_dict.keys())
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight"):
__lowercase = model_state_dict.pop(UpperCamelCase_)
else:
__lowercase = model_state_dict.pop(UpperCamelCase_)
__lowercase = BioGptConfig.from_pretrained(UpperCamelCase_)
__lowercase = BioGptForCausalLM(UpperCamelCase_)
# check that it loads ok
model_new.load_state_dict(UpperCamelCase_)
# save
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
print(F"""Generating {pytorch_weights_dump_path}""")
torch.save(UpperCamelCase_, UpperCamelCase_)
print("Conversion is done!")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 17 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowercase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowercase_ = "cuda" if torch.cuda.is_available() else "cpu"
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=100 , lowerCAmelCase__ : Optional[int]=" " ) -> List[str]:
__a = text.split(lowerCAmelCase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )]
def lowercase ( lowerCAmelCase__ : dict ) -> dict:
__a , __a = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(lowerCAmelCase__ ):
titles.append(title if title is not None else '''''' )
texts.append(lowerCAmelCase__ )
return {"title": titles, "text": texts}
def lowercase ( lowerCAmelCase__ : dict , lowerCAmelCase__ : DPRContextEncoder , lowerCAmelCase__ : DPRContextEncoderTokenizerFast ) -> dict:
__a = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
__a = ctx_encoder(input_ids.to(device=lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase ( lowerCAmelCase__ : "RagExampleArguments" , lowerCAmelCase__ : "ProcessingArguments" , lowerCAmelCase__ : "IndexHnswArguments" , ) -> Tuple:
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__a = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__a = dataset.map(lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
__a = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowerCAmelCase__ )
__a = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__a = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
__a = dataset.map(
partial(lowerCAmelCase__ , ctx_encoder=lowerCAmelCase__ , ctx_tokenizer=lowerCAmelCase__ ) , batched=lowerCAmelCase__ , batch_size=processing_args.batch_size , features=lowerCAmelCase__ , )
# And finally save your dataset
__a = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(lowerCAmelCase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__a = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=lowerCAmelCase__ )
# And save the index
__a = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(lowerCAmelCase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = field(
default=str(Path(__SCREAMING_SNAKE_CASE ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
__UpperCAmelCase : str = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
__UpperCAmelCase : str = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=str(Path(__SCREAMING_SNAKE_CASE ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
__UpperCAmelCase : int = field(
default=1_6 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : int = field(
default=7_6_8 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
__UpperCAmelCase : int = field(
default=1_2_8 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowercase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowercase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 45 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any, UpperCAmelCase__ : int ):
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
self.m_edges.append([u_node, v_node, weight] )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowercase ( self : List[Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase__ )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(UpperCAmelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase__ )
def _lowercase ( self : Any ):
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _A ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
SCREAMING_SNAKE_CASE__ = False
@skip_mps
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = StableDiffusionAttendAndExcitePipeline
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def _snake_case ( cls ) -> str:
super().setUpClass()
torch.use_deterministic_algorithms(lowercase )
@classmethod
def _snake_case ( cls ) -> List[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(lowercase )
def _snake_case ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowercase , )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCAmelCase = CLIPTextModel(lowercase )
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _snake_case ( self , lowercase , lowercase=0 ) -> Optional[Any]:
if str(lowercase ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(lowercase )
else:
lowerCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCAmelCase = lowerCAmelCase = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = self.get_dummy_inputs(lowercase )
lowerCAmelCase = pipe(**lowercase ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCAmelCase = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase , 1e-3 )
def _snake_case ( self ) -> Union[str, Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def _snake_case ( self ) -> int:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def _snake_case ( self ) -> Optional[int]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _snake_case ( self ) -> Optional[int]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def _snake_case ( self ) -> int:
super().test_save_load_local(expected_max_difference=5e-4 )
def _snake_case ( self ) -> int:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
@classmethod
def _snake_case ( cls ) -> Dict:
super().setUpClass()
torch.use_deterministic_algorithms(lowercase )
@classmethod
def _snake_case ( cls ) -> Tuple:
super().tearDownClass()
torch.use_deterministic_algorithms(lowercase )
def _snake_case ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = torch.manual_seed(51 )
lowerCAmelCase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=lowercase , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCAmelCase = """a painting of an elephant with glasses"""
lowerCAmelCase = [5, 7]
lowerCAmelCase = pipe(
prompt=lowercase , token_indices=lowercase , guidance_scale=7.5 , generator=lowercase , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 46 |
"""simple docstring"""
from math import sqrt
def _A ( UpperCamelCase_ : int) -> int:
'''simple docstring'''
__lowercase = 0
for i in range(1, int(sqrt(UpperCamelCase_) + 1)):
if n % i == 0 and i != sqrt(UpperCamelCase_):
total += i + n // i
elif i == sqrt(UpperCamelCase_):
total += i
return total - n
def _A ( UpperCamelCase_ : int = 10000) -> int:
'''simple docstring'''
__lowercase = sum(
i
for i in range(1, UpperCamelCase_)
if sum_of_divisors(sum_of_divisors(UpperCamelCase_)) == i and sum_of_divisors(UpperCamelCase_) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 17 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
lowerCamelCase : Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> Any:
"""simple docstring"""
for attribute in key.split('.' ):
_SCREAMING_SNAKE_CASE =getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
_SCREAMING_SNAKE_CASE =getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
_SCREAMING_SNAKE_CASE =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "weight_g":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "weight_v":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "bias":
_SCREAMING_SNAKE_CASE =value
else:
_SCREAMING_SNAKE_CASE =value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =fairseq_model.state_dict()
_SCREAMING_SNAKE_CASE =hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_SCREAMING_SNAKE_CASE =False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
_SCREAMING_SNAKE_CASE =True
else:
for key, mapped_key in MAPPING.items():
_SCREAMING_SNAKE_CASE ='unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
_SCREAMING_SNAKE_CASE =True
if "*" in mapped_key:
_SCREAMING_SNAKE_CASE =name.split(_UpperCamelCase )[0].split('.' )[-2]
_SCREAMING_SNAKE_CASE =mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
_SCREAMING_SNAKE_CASE ='weight_g'
elif "weight_v" in name:
_SCREAMING_SNAKE_CASE ='weight_v'
elif "bias" in name:
_SCREAMING_SNAKE_CASE ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_SCREAMING_SNAKE_CASE ='weight'
else:
_SCREAMING_SNAKE_CASE =None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =full_name.split('conv_layers.' )[-1]
_SCREAMING_SNAKE_CASE =name.split('.' )
_SCREAMING_SNAKE_CASE =int(items[0] )
_SCREAMING_SNAKE_CASE =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=None , _UpperCamelCase : str=None , _UpperCamelCase : Tuple=True ) -> str:
"""simple docstring"""
if config_path is not None:
_SCREAMING_SNAKE_CASE =UniSpeechSatConfig.from_pretrained(_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =UniSpeechSatConfig()
_SCREAMING_SNAKE_CASE =''
if is_finetuned:
_SCREAMING_SNAKE_CASE =UniSpeechSatForCTC(_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =UniSpeechSatForPreTraining(_UpperCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
_SCREAMING_SNAKE_CASE =model[0].eval()
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 47 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a = _symbol_database.Default()
_a = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
_a = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a = None
_a = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a = 45
_a = 15_81
_a = 15_17
_a = 15_70
_a = 15_84
_a = 17_93
_a = 17_95
_a = 19_16
_a = 18_64
_a = 19_05
_a = 19_19
_a = 24_29
_a = 22_08
_a = 24_18
_a = 23_23
_a = 24_07
# @@protoc_insertion_point(module_scope)
| 17 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , **UpperCamelCase__ , ) -> List[Any]:
super().__init__(features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : str = Sql(
cache_dir=UpperCamelCase__ , features=UpperCamelCase__ , sql=UpperCamelCase__ , con=UpperCamelCase__ , **UpperCamelCase__ , )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : List[Any] = None
lowerCamelCase : List[str] = None
lowerCamelCase : int = None
lowerCamelCase : Optional[Any] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , )
# Build dataset for splits
lowerCamelCase : Any = self.builder.as_dataset(
split="train" , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Any:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
lowerCamelCase : int = dataset
lowerCamelCase : int = name
lowerCamelCase : Optional[int] = con
lowerCamelCase : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCamelCase : int = num_proc
lowerCamelCase : int = to_sql_kwargs
def _lowercase ( self ) -> int:
lowerCamelCase : Optional[Any] = self.to_sql_kwargs.pop("sql" , UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = self.to_sql_kwargs.pop("con" , UpperCamelCase__ )
lowerCamelCase : str = self.to_sql_kwargs.pop("index" , UpperCamelCase__ )
lowerCamelCase : str = self._write(index=UpperCamelCase__ , **self.to_sql_kwargs )
return written
def _lowercase ( self , UpperCamelCase__ ) -> str:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = args
lowerCamelCase : Tuple = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
lowerCamelCase : List[str] = query_table(
table=self.dataset.data , key=slice(UpperCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
lowerCamelCase : List[Any] = batch.to_pandas()
lowerCamelCase : Any = df.to_sql(self.name , self.con , index=UpperCamelCase__ , **UpperCamelCase__ )
return num_rows or len(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> int:
lowerCamelCase : Tuple = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
lowerCamelCase , lowerCamelCase : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCamelCase__ , UpperCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 48 |
"""simple docstring"""
import baseaa
def _A ( UpperCamelCase_ : str) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8"))
def _A ( UpperCamelCase_ : bytes) -> str:
'''simple docstring'''
return baseaa.baadecode(UpperCamelCase_).decode("utf-8")
if __name__ == "__main__":
_a = 'Hello World!'
_a = baseaa_encode(test)
print(encoded)
_a = baseaa_decode(encoded)
print(decoded)
| 17 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _A ( nn.Module ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int = 16 , __SCREAMING_SNAKE_CASE : int = 88 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : str = "geglu" , __SCREAMING_SNAKE_CASE : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__a = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , cross_attention_dim=__SCREAMING_SNAKE_CASE , attention_bias=__SCREAMING_SNAKE_CASE , sample_size=__SCREAMING_SNAKE_CASE , num_vector_embeds=__SCREAMING_SNAKE_CASE , activation_fn=__SCREAMING_SNAKE_CASE , num_embeds_ada_norm=__SCREAMING_SNAKE_CASE , )
for _ in range(2)
])
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__a = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__a = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__a = [1, 0]
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = hidden_states
__a = []
__a = 0
# attention_mask is not used yet
for i in range(2):
# for each of the two transformers, pass the corresponding condition tokens
__a = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__a = self.transformer_index_for_condition[i]
__a = self.transformers[transformer_index](
__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , cross_attention_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
encoded_states.append(encoded_state - input_states)
tokens_start += self.condition_lengths[i]
__a = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__a = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__SCREAMING_SNAKE_CASE)
| 49 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Any) -> List[str]:
'''simple docstring'''
__lowercase ,__lowercase = [], []
while len(UpperCamelCase_) > 1:
__lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_)
start.append(UpperCamelCase_)
end.append(UpperCamelCase_)
collection.remove(UpperCamelCase_)
collection.remove(UpperCamelCase_)
end.reverse()
return start + collection + end
if __name__ == "__main__":
_a = input('Enter numbers separated by a comma:\n').strip()
_a = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 17 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """pix2struct_text_model"""
UpperCAmelCase__ = ["""past_key_values"""]
UpperCAmelCase__ = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , UpperCAmelCase : Any=50244 , UpperCAmelCase : Union[str, Any]=768 , UpperCAmelCase : List[Any]=64 , UpperCAmelCase : str=2048 , UpperCAmelCase : Optional[int]=12 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : List[Any]=1e-6 , UpperCAmelCase : List[str]=1.0 , UpperCAmelCase : Any="gelu_new" , UpperCAmelCase : Optional[int]=0 , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Dict=False , UpperCAmelCase : List[str]=True , **UpperCAmelCase : Optional[int] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Optional[int] = d_kv
lowerCamelCase__ : Union[str, Any] = d_ff
lowerCamelCase__ : Dict = num_layers
lowerCamelCase__ : Tuple = num_heads
lowerCamelCase__ : str = relative_attention_num_buckets
lowerCamelCase__ : Optional[Any] = relative_attention_max_distance
lowerCamelCase__ : Tuple = dropout_rate
lowerCamelCase__ : str = layer_norm_epsilon
lowerCamelCase__ : str = initializer_factor
lowerCamelCase__ : Optional[Any] = use_cache
lowerCamelCase__ : Dict = eos_token_id
lowerCamelCase__ : Dict = decoder_start_token_id
# for backwards compatibility
lowerCamelCase__ : Tuple = dense_act_fn
super().__init__(
pad_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , is_decoder=UpperCAmelCase , **UpperCAmelCase , )
@classmethod
def A_ ( cls : Tuple , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : str ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
lowerCamelCase__ : Union[str, Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """pix2struct_vision_model"""
def __init__( self : Optional[int] , UpperCAmelCase : int=768 , UpperCAmelCase : Optional[Any]=768 , UpperCAmelCase : Optional[Any]=2048 , UpperCAmelCase : str=64 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : Any="gelu_new" , UpperCAmelCase : List[Any]=1e-6 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Union[str, Any]=1e-10 , UpperCAmelCase : List[Any]=1.0 , UpperCAmelCase : Optional[int]=4096 , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : Union[str, Any]=128 , **UpperCAmelCase : List[str] , ) -> Tuple:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Any = patch_embed_hidden_size
lowerCamelCase__ : Any = d_ff
lowerCamelCase__ : int = dropout_rate
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Optional[int] = initializer_factor
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : str = layer_norm_eps
lowerCamelCase__ : List[Any] = dense_act_fn
lowerCamelCase__ : int = seq_len
lowerCamelCase__ : str = relative_attention_num_buckets
lowerCamelCase__ : List[str] = relative_attention_max_distance
lowerCamelCase__ : Dict = d_kv
@classmethod
def A_ ( cls : Any , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : int ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
lowerCamelCase__ : Optional[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """pix2struct"""
UpperCAmelCase__ = True
def __init__( self : str , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : str=None , UpperCAmelCase : str=1.0 , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : List[Any]=True , **UpperCAmelCase : Dict , ) -> Optional[int]:
super().__init__(tie_word_embeddings=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
if text_config is None:
lowerCamelCase__ : List[Any] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
lowerCamelCase__ : Tuple = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
lowerCamelCase__ : List[Any] = PixaStructTextConfig(**UpperCAmelCase )
lowerCamelCase__ : List[Any] = PixaStructVisionConfig(**UpperCAmelCase )
lowerCamelCase__ : List[str] = self.text_config.decoder_start_token_id
lowerCamelCase__ : int = self.text_config.pad_token_id
lowerCamelCase__ : Any = self.text_config.eos_token_id
lowerCamelCase__ : Optional[int] = initializer_factor
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Union[str, Any] = self.initializer_range
lowerCamelCase__ : Optional[Any] = self.initializer_range
lowerCamelCase__ : List[str] = is_vqa
@classmethod
def A_ ( cls : Dict , UpperCAmelCase : PixaStructTextConfig , UpperCAmelCase : PixaStructVisionConfig , **UpperCAmelCase : Union[str, Any] ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase )
def A_ ( self : int ) -> Optional[int]:
lowerCamelCase__ : Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : List[Any] = self.text_config.to_dict()
lowerCamelCase__ : Any = self.vision_config.to_dict()
lowerCamelCase__ : List[Any] = self.__class__.model_type
return output
| 50 |
"""simple docstring"""
def _A ( UpperCamelCase_ : list[int]) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty")
__lowercase = sum(UpperCamelCase_) / len(UpperCamelCase_) # Calculate the average
return sum(abs(x - average) for x in nums) / len(UpperCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self : List[str] , _snake_case : Any , _snake_case : Tuple=13 , _snake_case : Tuple=7 , _snake_case : Optional[int]=True , _snake_case : Optional[int]=True , _snake_case : List[str]=True , _snake_case : Dict=True , _snake_case : Any=99 , _snake_case : str=32 , _snake_case : Optional[Any]=5 , _snake_case : Any=4 , _snake_case : Tuple=37 , _snake_case : Optional[int]="gelu" , _snake_case : int=0.1 , _snake_case : Optional[Any]=0.1 , _snake_case : Any=128 , _snake_case : List[str]=32 , _snake_case : str=16 , _snake_case : str=2 , _snake_case : List[str]=0.0_2 , _snake_case : str=3 , _snake_case : Optional[Any]=4 , _snake_case : Optional[int]=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = NezhaModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case)
UpperCAmelCase_ = model(_snake_case , token_type_ids=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Optional[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = True
UpperCAmelCase_ = NezhaModel(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , )
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : str , _snake_case : int , _snake_case : Tuple , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = NezhaForMaskedLM(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase ( self : Tuple , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Dict , _snake_case : int , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = NezhaForNextSentencePrediction(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def lowerCamelCase ( self : Optional[Any] , _snake_case : Tuple , _snake_case : Dict , _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = NezhaForPreTraining(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , next_sentence_label=_snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def lowerCamelCase ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : int , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = NezhaForQuestionAnswering(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase ( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : str , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = NezhaForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : Optional[int] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = NezhaForTokenClassification(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = NezhaForMultipleChoice(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : str = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = True
def lowerCamelCase ( self : Optional[Any] , _snake_case : Any , _snake_case : int , _snake_case : List[str]=False):
"""simple docstring"""
UpperCAmelCase_ = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case)
if return_labels:
if model_class in get_values(_snake_case):
UpperCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case)
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case)
return inputs_dict
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = NezhaModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , hidden_size=37)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase_ = None
self.model_tester.create_and_check_model_as_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case)
@slow
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = NezhaModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
@slow
@require_torch_gpu
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(config=_snake_case)
UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case)
UpperCAmelCase_ = torch.jit.trace(
_snake_case , (inputs_dict['''input_ids'''].to('''cpu'''), inputs_dict['''attention_mask'''].to('''cpu''')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_snake_case , os.path.join(_snake_case , '''bert.pt'''))
UpperCAmelCase_ = torch.jit.load(os.path.join(_snake_case , '''bert.pt''') , map_location=_snake_case)
loaded(inputs_dict['''input_ids'''].to(_snake_case) , inputs_dict['''attention_mask'''].to(_snake_case))
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''')
UpperCAmelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]])
UpperCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1]])
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case)[0]
UpperCAmelCase_ = torch.Size((1, 6, 768))
self.assertEqual(output.shape , _snake_case)
UpperCAmelCase_ = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1e-4))
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''')
UpperCAmelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1]])
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case)[0]
UpperCAmelCase_ = torch.Size((1, 6, 21128))
self.assertEqual(output.shape , _snake_case)
UpperCAmelCase_ = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1e-4))
| 51 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ):
__lowercase = parent
__lowercase = vocab_size
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def _lowercase ( self : int ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size )
__lowercase = BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, )
return config, pixel_values, labels
def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ):
__lowercase = FlaxBeitModel(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ):
__lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ):
__lowercase = self.type_sequence_label_size
__lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _lowercase ( self : List[Any] ):
__lowercase = FlaxBeitModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(UpperCAmelCase__ )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1], UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ):
return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape, output.shape )
def _lowercase ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(UpperCAmelCase__ )
def _A ( ) -> str:
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_vision
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values
# prepare bool_masked_pos
__lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ )
# forward pass
__lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) )
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_0_0_0)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_8_1
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[str] ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 2_1_8_4_1)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
| 17 | 0 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Any:
# Format the message.
if name is None:
UpperCamelCase : Dict = None
else:
UpperCamelCase : Tuple = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
UpperCamelCase : List[str] = fmt.format(_lowerCAmelCase )
# Print and recurse (if needed).
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if msg is not None:
print(_lowerCAmelCase )
for k in val.keys():
recursive_print(_lowerCAmelCase , val[k] , spaces + 2 )
elif isinstance(_lowerCAmelCase , torch.Tensor ):
print(_lowerCAmelCase , ":" , val.size() )
else:
print(_lowerCAmelCase , ":" , _lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
UpperCamelCase : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
UpperCamelCase : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
UpperCamelCase : str = param.view(*_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = param.transpose(0 , 2 )
UpperCamelCase : List[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
UpperCamelCase : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
UpperCamelCase : Dict = param.view(*_lowerCAmelCase )
UpperCamelCase : Dict = param.transpose(0 , 1 ).contiguous()
UpperCamelCase : List[Any] = param.view(*_lowerCAmelCase )
return param
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
# The converted output model.
UpperCamelCase : List[str] = {}
# old versions did not store training args
UpperCamelCase : Optional[int] = input_state_dict.get("args" , _lowerCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
UpperCamelCase : Optional[int] = ds_args.padded_vocab_size
UpperCamelCase : int = ds_args.max_position_embeddings
UpperCamelCase : Any = ds_args.hidden_size
UpperCamelCase : Dict = ds_args.num_layers
UpperCamelCase : str = ds_args.num_attention_heads
UpperCamelCase : Dict = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
UpperCamelCase : Optional[Any] = config.n_head
# The hidden_size per head.
UpperCamelCase : List[str] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
UpperCamelCase : Optional[int] = input_state_dict["checkpoint_version"]
else:
UpperCamelCase : Tuple = 0.0
# The model.
UpperCamelCase : Dict = input_state_dict["model"]
# The language model.
UpperCamelCase : Any = model["language_model"]
# The embeddings.
UpperCamelCase : Tuple = lm["embedding"]
# The word embeddings.
UpperCamelCase : List[str] = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
UpperCamelCase : Dict = word_embeddings[: config.vocab_size, :]
UpperCamelCase : Optional[int] = word_embeddings
# The position embeddings.
UpperCamelCase : List[str] = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
UpperCamelCase : Union[str, Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
UpperCamelCase : List[Any] = pos_embeddings
# The transformer.
UpperCamelCase : int = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
UpperCamelCase : List[Any] = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
UpperCamelCase : int = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
UpperCamelCase : List[str] = layer_re.match(_lowerCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
UpperCamelCase : List[str] = int(m.group(1 ) )
# The name of the operation.
UpperCamelCase : List[Any] = m.group(2 )
# Is it a weight or a bias?
UpperCamelCase : str = m.group(3 )
# The name of the layer.
UpperCamelCase : List[Any] = F"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
UpperCamelCase : Optional[int] = "ln_1" if op_name.startswith("input" ) else "ln_2"
UpperCamelCase : str = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
UpperCamelCase : str = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Optional[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
UpperCamelCase : Dict = torch.tensor(-1e4 , dtype=torch.floataa )
UpperCamelCase : Dict = masked_bias
UpperCamelCase : List[Any] = fix_query_key_value_ordering(_lowerCAmelCase , _lowerCAmelCase , 3 , _lowerCAmelCase , _lowerCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
UpperCamelCase : Any = out_val.transpose(0 , 1 ).contiguous()
# Store.
UpperCamelCase : int = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
UpperCamelCase : Dict = fix_query_key_value_ordering(_lowerCAmelCase , _lowerCAmelCase , 3 , _lowerCAmelCase , _lowerCAmelCase )
# Store. No change of shape.
UpperCamelCase : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
UpperCamelCase : Tuple = megatron_to_transformers[op_name]
UpperCamelCase : Tuple = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
UpperCamelCase : int = megatron_to_transformers[op_name]
UpperCamelCase : str = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
UpperCamelCase : Optional[int] = transformer["final_layernorm.weight"]
UpperCamelCase : Optional[Any] = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
UpperCamelCase : List[Any] = word_embeddings
# It should be done!
return output_state_dict
def A_ ( ) -> List[str]:
# Create the argument parser.
UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=_lowerCAmelCase , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=_lowerCAmelCase , help="An optional config json file describing the pre-trained model." , )
UpperCamelCase : List[str] = parser.parse_args()
# Extract the basename.
UpperCamelCase : List[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
UpperCamelCase : int = torch.load(_lowerCAmelCase , map_location="cpu" )
else:
UpperCamelCase : List[Any] = torch.load(args.path_to_checkpoint , map_location="cpu" )
UpperCamelCase : Optional[Any] = input_state_dict.get("args" , _lowerCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
UpperCamelCase : Dict = "gelu_fast"
elif ds_args.openai_gelu:
UpperCamelCase : str = "gelu_new"
else:
UpperCamelCase : Tuple = "gelu"
else:
# in the very early days this used to be "gelu_new"
UpperCamelCase : Tuple = "gelu_new"
# Spell out all parameters in case the defaults change.
UpperCamelCase : List[Any] = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_lowerCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=_lowerCAmelCase , summary_activation=_lowerCAmelCase , summary_proj_to_labels=_lowerCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=_lowerCAmelCase , use_cache=_lowerCAmelCase , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
UpperCamelCase : int = GPTaConfig.from_json_file(args.config_file )
UpperCamelCase : str = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
UpperCamelCase : Dict = convert_megatron_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_lowerCAmelCase , _lowerCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
UpperCamelCase : Union[str, Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
UpperCamelCase : Optional[int] = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
UpperCamelCase : Any = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
UpperCamelCase : Optional[int] = "gpt2"
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Tuple = type(_lowerCAmelCase ).__name__
UpperCamelCase : Optional[int] = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(_lowerCAmelCase )
# Save tokenizer based on args
print(F"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(_lowerCAmelCase )
# Store the state_dict to file.
UpperCamelCase : Optional[Any] = os.path.join(_lowerCAmelCase , "pytorch_model.bin" )
print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(_lowerCAmelCase , _lowerCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 52 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowerCAmelCase ( unittest.TestCase ,lowercase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
__lowercase = load_tool("text-classification" )
self.tool.setup()
__lowercase = load_tool("text-classification", remote=UpperCAmelCase__ )
def _lowercase ( self : str ):
__lowercase = self.tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : str ):
__lowercase = self.remote_tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : List[str] ):
__lowercase = self.tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : Tuple ):
__lowercase = self.remote_tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
| 17 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a__ : Any =get_tests_dir('''fixtures''')
a__ : int =get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
a__ : int =get_tests_dir('''fixtures/dummy-config.json''')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = 0
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__A , __A )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def _lowerCamelCase ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(__A ).to_dict()
config_dict.pop('feature_extractor_type' )
__UpperCamelCase = WavaVecaFeatureExtractor(**__A )
# save in new folder
model_config.save_pretrained(__A )
config.save_pretrained(__A )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(__A )
# make sure private variable is not incorrectly saved
__UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(__A , __A )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def _lowerCamelCase ( self : Dict ):
with self.assertRaisesRegex(
__A , 'bert-base is not a local folder and is not a valid model identifier' ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained('bert-base' )
def _lowerCamelCase ( self : List[Any] ):
with self.assertRaisesRegex(
__A , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(__A , revision='aaaaaa' )
def _lowerCamelCase ( self : List[str] ):
with self.assertRaisesRegex(
__A , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def _lowerCamelCase ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__A )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__A )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__A )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def _lowerCamelCase ( self : List[str] ):
try:
AutoConfig.register('custom' , __A )
AutoFeatureExtractor.register(__A , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoFeatureExtractor.register(__A , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__A )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self : Any ):
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =True
try:
AutoConfig.register('custom' , __A )
AutoFeatureExtractor.register(__A , __A )
# If remote code is not set, the default is to use local
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__A )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__A )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(__A , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 53 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_a = 'CompVis/stable-diffusion-v1-1'
_a = 'CompVis/stable-diffusion-v1-2'
_a = 'CompVis/stable-diffusion-v1-3'
_a = 'CompVis/stable-diffusion-v1-4'
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCAmelCase__ : StableDiffusionSafetyChecker, UpperCAmelCase__ : CLIPImageProcessor, UpperCAmelCase__ : bool = True, ):
super()._init_()
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, requires_safety_checker=UpperCAmelCase__, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea )
@property
def _lowercase ( self : List[str] ):
return {k: getattr(self, UpperCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )}
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Tuple, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Any, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Optional[int], ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
__lowercase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(UpperCAmelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 17 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
a__ : str = list(range(1_0, 0, -1))
print(F"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
| 54 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = "ssube/stable-diffusion-x4-upscaler-onnx"
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[str]=0 ):
__lowercase = floats_tensor((1, 3, 1_2_8, 1_2_8), rng=random.Random(UpperCAmelCase__ ) )
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _lowercase ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : int ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : str ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : Dict ):
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _lowercase ( self : Dict ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowercase ( self : str ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
__lowercase = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" )
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", scheduler=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 17 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , UpperCamelCase=0 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = projection_dim
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
lowerCamelCase_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFDPRContextEncoder(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , token_type_ids=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFDPRQuestionEncoder(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , token_type_ids=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFDPRReader(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_lowerCamelCase = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFDPRModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRReader.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
lowerCamelCase_ = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase_ = model(UpperCamelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 55 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_a = datasets.utils.logging.get_logger(__name__)
_a = ['names', 'prefix']
_a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_a = ['encoding_errors', 'on_bad_lines']
_a = ['date_format']
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : str = ","
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[Union[int, List[int], str]] = "infer"
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[Union[int, str, List[int], List[str]]] = None
__UpperCAmelCase : Optional[Union[List[int], List[str]]] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[Literal["c", "python", "pyarrow"]] = None
__UpperCAmelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[Union[int, List[int]]] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[Union[str, List[str]]] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = "."
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = '"'
__UpperCAmelCase : int = 0
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : int = 1_0_0_0_0
__UpperCAmelCase : Optional[datasets.Features] = None
__UpperCAmelCase : Optional[str] = "strict"
__UpperCAmelCase : Literal["error", "warn", "skip"] = "error"
__UpperCAmelCase : Optional[str] = None
def _lowercase ( self : Tuple ):
if self.delimiter is not None:
__lowercase = self.delimiter
if self.column_names is not None:
__lowercase = self.column_names
@property
def _lowercase ( self : Union[str, Any] ):
__lowercase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), UpperCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : Tuple = CsvConfig
def _lowercase ( self : List[str] ):
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowercase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase__, (str, list, tuple) ):
__lowercase = data_files
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )]
__lowercase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__, gen_kwargs={"files": files} ) )
return splits
def _lowercase ( self : Dict, UpperCAmelCase__ : pa.Table ):
if self.config.features is not None:
__lowercase = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
__lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=UpperCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__lowercase = table_cast(UpperCAmelCase__, UpperCAmelCase__ )
return pa_table
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : List[str] ):
__lowercase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__lowercase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ):
__lowercase = pd.read_csv(UpperCAmelCase__, iterator=UpperCAmelCase__, dtype=UpperCAmelCase__, **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCAmelCase__ ):
__lowercase = pa.Table.from_pandas(UpperCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase__ )}: {e}""" )
raise
| 17 | 0 |
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
a : List[str] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class a ( _lowerCamelCase ):
def __init__( self : List[Any] , lowercase_ : int = 101 ):
snake_case_ = length
def __len__( self : Optional[Any] ):
return self.length
def __getitem__( self : Any , lowercase_ : Any ):
return i
class a :
def __call__( self : Dict , lowercase_ : str ):
return {"input_ids": torch.tensor(lowercase_ ), "labels": torch.tensor(lowercase_ )}
class a ( nn.Module ):
def __init__( self : Any ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
snake_case_ = nn.Linear(120 , 80 )
def A_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[Any]=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class a ( _lowerCamelCase ):
@require_torch_neuroncore
def A_ ( self : List[Any] ):
snake_case_ = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F"--output_dir {output_dir}".split()
snake_case_ = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowercase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class a ( _lowerCamelCase ):
@require_torch_multi_gpu
def A_ ( self : List[str] ):
snake_case_ = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F"--output_dir {output_dir}".split()
snake_case_ = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowercase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
a : List[Any] = HfArgumentParser((TrainingArguments,))
a : Any = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
a : List[Any] = DummyDataset(dataset_length)
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = list(range(len(__UpperCAmelCase ) ) )
snake_case_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
a : List[str] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
a : str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a : str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a : Any = 2
a : int = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a : Optional[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a : Tuple = None
| 56 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
_a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
_a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
_a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ):
__lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 17 | 0 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
__lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert("RGB" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
__lowerCAmelCase = transform(_UpperCamelCase ).unsqueeze(0 ).to(_UpperCamelCase )
return image
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if "visual_encoder" in key:
__lowerCAmelCase = re.sub("visual_encoder*" , "vision_model.encoder" , _UpperCamelCase )
if "blocks" in key:
__lowerCAmelCase = re.sub(R"blocks" , "layers" , _UpperCamelCase )
if "attn" in key:
__lowerCAmelCase = re.sub(R"attn" , "self_attn" , _UpperCamelCase )
if "norm1" in key:
__lowerCAmelCase = re.sub(R"norm1" , "layer_norm1" , _UpperCamelCase )
if "norm2" in key:
__lowerCAmelCase = re.sub(R"norm2" , "layer_norm2" , _UpperCamelCase )
if "encoder.norm" in key:
__lowerCAmelCase = re.sub(R"encoder.norm" , "post_layernorm" , _UpperCamelCase )
if "encoder.patch_embed.proj" in key:
__lowerCAmelCase = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , _UpperCamelCase )
if "encoder.pos_embed" in key:
__lowerCAmelCase = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , _UpperCamelCase )
if "encoder.cls_token" in key:
__lowerCAmelCase = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , _UpperCamelCase )
if "self_attn" in key:
__lowerCAmelCase = re.sub(R"self_attn.proj" , "self_attn.projection" , _UpperCamelCase )
return key
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
if config_path is not None:
__lowerCAmelCase = BlipConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__lowerCAmelCase = BlipForConditionalGeneration(_UpperCamelCase ).eval()
__lowerCAmelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
__lowerCAmelCase = blip_decoder(pretrained=_UpperCamelCase , image_size=384 , vit="base" )
__lowerCAmelCase = pt_model.eval()
__lowerCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
__lowerCAmelCase = modified_state_dict.pop(_UpperCamelCase )
__lowerCAmelCase = rename_key(_UpperCamelCase )
__lowerCAmelCase = value
hf_model.load_state_dict(_UpperCamelCase )
__lowerCAmelCase = 384
__lowerCAmelCase = load_demo_image(image_size=_UpperCamelCase , device="cpu" )
__lowerCAmelCase = BertTokenizer.from_pretrained("bert-base-uncased" )
__lowerCAmelCase = tokenizer(["a picture of"] ).input_ids
__lowerCAmelCase = hf_model.generate(_UpperCamelCase , _UpperCamelCase )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
__lowerCAmelCase = hf_model.generate(_UpperCamelCase )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_UpperCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowerCAmelCase = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
__lowerCAmelCase = blip_vqa(pretrained=_UpperCamelCase , image_size=_UpperCamelCase , vit="base" )
vqa_model.eval()
__lowerCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowerCAmelCase = modified_state_dict.pop(_UpperCamelCase )
__lowerCAmelCase = rename_key(_UpperCamelCase )
__lowerCAmelCase = value
__lowerCAmelCase = BlipForQuestionAnswering(_UpperCamelCase )
hf_vqa_model.load_state_dict(_UpperCamelCase )
__lowerCAmelCase = ["How many dogs are in this image?"]
__lowerCAmelCase = tokenizer(_UpperCamelCase , return_tensors="pt" ).input_ids
__lowerCAmelCase = hf_vqa_model.generate(_UpperCamelCase , _UpperCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
__lowerCAmelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
__lowerCAmelCase = blip_itm(pretrained=_UpperCamelCase , image_size=_UpperCamelCase , vit="base" )
itm_model.eval()
__lowerCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
__lowerCAmelCase = modified_state_dict.pop(_UpperCamelCase )
__lowerCAmelCase = rename_key(_UpperCamelCase )
__lowerCAmelCase = value
__lowerCAmelCase = BlipForImageTextRetrieval(_UpperCamelCase )
__lowerCAmelCase = ["A picture of a woman with a dog sitting in a beach"]
__lowerCAmelCase = tokenizer(
_UpperCamelCase , return_tensors="pt" , padding="max_length" , truncation=_UpperCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_UpperCamelCase )
hf_itm_model.eval()
__lowerCAmelCase = hf_itm_model(_UpperCamelCase , _UpperCamelCase , use_itm_head=_UpperCamelCase )
__lowerCAmelCase = hf_itm_model(_UpperCamelCase , _UpperCamelCase , use_itm_head=_UpperCamelCase )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A : Optional[int] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 57 |
"""simple docstring"""
from collections.abc import Sequence
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_))
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
__lowercase = 0.0
for coeff in reversed(UpperCamelCase_):
__lowercase = result * x + coeff
return result
if __name__ == "__main__":
_a = (0.0, 0.0, 5.0, 9.3, 7.0)
_a = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float ) ->float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : str ):
super().__init__()
__lowercase = model
__lowercase = 2
__lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels )
def _lowercase ( self : Optional[int] ):
pass
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = LongformerModel.from_pretrained(UpperCamelCase_)
__lowercase = LightningModel(UpperCamelCase_)
__lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu"))
lightning_model.load_state_dict(ckpt["state_dict"])
# init longformer question answering model
__lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_)
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase_)
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 17 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
__lowerCamelCase = {
"""facebook/mbart-large-50-one-to-many-mmt""": 10_24,
}
# fmt: off
__lowerCamelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCAmelCase ( A_ ):
A__ : Optional[Any] = VOCAB_FILES_NAMES
A__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = ["input_ids", "attention_mask"]
A__ : List[int] = []
A__ : List[int] = []
def __init__(self : List[Any] , snake_case__ : Tuple , snake_case__ : Dict=None , snake_case__ : Any=None , snake_case__ : int="</s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Dict="<mask>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Tuple , ) -> None:
'''simple docstring'''
snake_case : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
snake_case : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case__ , tgt_lang=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Optional[int] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : List[str] = 1
snake_case : int = len(self.sp_model )
snake_case : Any = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case__ )
}
snake_case : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
snake_case : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case : str = src_lang if src_lang is not None else "en_XX"
snake_case : Dict = self.lang_code_to_id[self._src_lang]
snake_case : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self : str ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = self.__dict__.copy()
snake_case : List[str] = None
return state
def __setstate__(self : str , snake_case__ : Dict ) -> None:
'''simple docstring'''
snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
snake_case : Dict = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : str ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : List[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : int = []
snake_case : Union[str, Any] = ""
snake_case : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
snake_case : List[Any] = True
snake_case : List[Any] = []
else:
current_sub_tokens.append(snake_case__ )
snake_case : Optional[Any] = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : str = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
snake_case : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : Optional[str] , snake_case__ : Optional[str] , **snake_case__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
snake_case : str = src_lang
snake_case : List[str] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
snake_case : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
snake_case : Optional[int] = tgt_lang_id
return inputs
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : List[str] , snake_case__ : str = "en_XX" , snake_case__ : Optional[List[str]] = None , snake_case__ : str = "ro_RO" , **snake_case__ : Union[str, Any] , ) -> BatchEncoding:
'''simple docstring'''
snake_case : str = src_lang
snake_case : int = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> str:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : List[str] = self.lang_code_to_id[src_lang]
snake_case : List[str] = [self.cur_lang_code_id]
snake_case : Any = [self.eos_token_id]
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : Tuple = self.lang_code_to_id[tgt_lang]
snake_case : Union[str, Any] = [self.cur_lang_code_id]
snake_case : Tuple = [self.eos_token_id]
| 59 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, )
assert hasattr(self, "env" )
def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ):
# configuration for running training on smdistributed Model Parallel
__lowercase = {
"enabled": True,
"processes_per_host": 8,
}
__lowercase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_0_0,
}, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", )
def _lowercase ( self : Tuple, UpperCAmelCase__ : int ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ):
# create estimator
__lowercase = self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
| 17 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : int ):
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "openai/whisper-base"
__UpperCAmelCase : Union[str, Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCAmelCase : List[str] = "transcriber"
__UpperCAmelCase : Optional[Any] = WhisperProcessor
__UpperCAmelCase : str = WhisperForConditionalGeneration
__UpperCAmelCase : List[str] = ["audio"]
__UpperCAmelCase : Tuple = ["text"]
def _lowercase ( self : str, UpperCAmelCase__ : int ):
return self.pre_processor(UpperCAmelCase__, return_tensors="pt" ).input_features
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ):
return self.model.generate(inputs=UpperCAmelCase__ )
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int] ):
return self.pre_processor.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )[0]
| 17 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default="""cifar10""" ,metadata={"""help""": """Name of a dataset from the datasets package"""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """The column name of the images in the files."""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(default=lowercase__ ,metadata={"""help""": """A folder containing the training data."""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(default=lowercase__ ,metadata={"""help""": """A folder containing the validation data."""} )
SCREAMING_SNAKE_CASE__ : Optional[float] = field(
default=0.1_5 ,metadata={"""help""": """Percent to split off of train for validation."""} )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=lowercase__ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} ,)
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=lowercase__ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} ,)
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = {}
if self.train_dir is not None:
UpperCAmelCase_ : Optional[int] = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase_ : List[str] = self.validation_dir
UpperCAmelCase_ : List[str] = data_files if data_files else None
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(
default=lowercase__ ,metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} ,)
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} ,)
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
SCREAMING_SNAKE_CASE__ : str = field(
default="""main""" ,metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} ,)
SCREAMING_SNAKE_CASE__ : str = field(default=lowercase__ ,metadata={"""help""": """Name or path of preprocessor config."""} )
SCREAMING_SNAKE_CASE__ : bool = field(
default=lowercase__ ,metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} ,)
SCREAMING_SNAKE_CASE__ : float = field(
default=0.7_5 ,metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
SCREAMING_SNAKE_CASE__ : bool = field(
default=lowercase__ ,metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : float = field(
default=1e-3 ,metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : int = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def __a ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae", __lowerCamelCase, __lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase_ : Any = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
UpperCAmelCase_ : Tuple = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase_ : Optional[Any] = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, __lowerCamelCase ) and data_args.train_val_split > 0.0:
UpperCAmelCase_ : List[str] = ds["train"].train_test_split(data_args.train_val_split )
UpperCAmelCase_ : Dict = split["train"]
UpperCAmelCase_ : str = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : Dict = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCAmelCase_ : Tuple = ViTMAEConfig.from_pretrained(model_args.config_name, **__lowerCamelCase )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path, **__lowerCamelCase )
else:
UpperCAmelCase_ : Tuple = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase_ : List[Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name, **__lowerCamelCase )
elif model_args.model_name_or_path:
UpperCAmelCase_ : str = ViTImageProcessor.from_pretrained(model_args.model_name_or_path, **__lowerCamelCase )
else:
UpperCAmelCase_ : Optional[int] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCAmelCase_ : int = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=__lowerCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info("Training new model from scratch" )
UpperCAmelCase_ : Dict = ViTMAEForPreTraining(__lowerCamelCase )
if training_args.do_train:
UpperCAmelCase_ : Optional[int] = ds["train"].column_names
else:
UpperCAmelCase_ : Dict = ds["validation"].column_names
if data_args.image_column_name is not None:
UpperCAmelCase_ : int = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase_ : Optional[Any] = "image"
elif "img" in column_names:
UpperCAmelCase_ : Optional[int] = "img"
else:
UpperCAmelCase_ : str = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCAmelCase_ : List[str] = image_processor.size["shortest_edge"]
else:
UpperCAmelCase_ : int = (image_processor.size["height"], image_processor.size["width"])
UpperCAmelCase_ : Tuple = Compose(
[
Lambda(lambda __lowerCamelCase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__lowerCamelCase, scale=(0.2, 1.0), interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean, std=image_processor.image_std ),
] )
def preprocess_images(__lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = [transforms(__lowerCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
UpperCAmelCase_ : List[str] = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowerCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
UpperCAmelCase_ : Optional[int] = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowerCamelCase )
# Compute absolute learning rate
UpperCAmelCase_ : Optional[int] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCAmelCase_ : Union[str, Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCAmelCase_ : Dict = Trainer(
model=__lowerCamelCase, args=__lowerCamelCase, train_dataset=ds["train"] if training_args.do_train else None, eval_dataset=ds["validation"] if training_args.do_eval else None, tokenizer=__lowerCamelCase, data_collator=__lowerCamelCase, )
# Training
if training_args.do_train:
UpperCAmelCase_ : str = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ : Union[str, Any] = last_checkpoint
UpperCAmelCase_ : List[str] = trainer.train(resume_from_checkpoint=__lowerCamelCase )
trainer.save_model()
trainer.log_metrics("train", train_result.metrics )
trainer.save_metrics("train", train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase_ : Optional[Any] = trainer.evaluate()
trainer.log_metrics("eval", __lowerCamelCase )
trainer.save_metrics("eval", __lowerCamelCase )
# Write model card and (optionally) push to hub
UpperCAmelCase_ : Optional[Any] = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
def __a ( __lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 61 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase_, torch.Tensor):
return image
elif isinstance(UpperCamelCase_, PIL.Image.Image):
__lowercase = [image]
if isinstance(image[0], PIL.Image.Image):
__lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
__lowercase = np.concatenate(UpperCamelCase_, axis=0)
__lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0
__lowercase = image.transpose(0, 3, 1, 2)
__lowercase = 2.0 * image - 1.0
__lowercase = torch.from_numpy(UpperCamelCase_)
elif isinstance(image[0], torch.Tensor):
__lowercase = torch.cat(UpperCamelCase_, dim=0)
return image
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int:
'''simple docstring'''
if not isinstance(UpperCamelCase_, np.ndarray):
__lowercase = True
__lowercase = va.device
__lowercase = va.cpu().numpy()
__lowercase = va.cpu().numpy()
__lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_)))
if np.abs(UpperCamelCase_) > DOT_THRESHOLD:
__lowercase = (1 - t) * va + t * va
else:
__lowercase = np.arccos(UpperCamelCase_)
__lowercase = np.sin(UpperCamelCase_)
__lowercase = theta_a * t
__lowercase = np.sin(UpperCamelCase_)
__lowercase = np.sin(theta_a - theta_t) / sin_theta_a
__lowercase = sin_theta_t / sin_theta_a
__lowercase = sa * va + sa * va
if inputs_are_torch:
__lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_)
return va
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int:
'''simple docstring'''
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]:
'''simple docstring'''
for param in model.parameters():
__lowercase = value
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ):
super().__init__()
self.register_modules(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, )
__lowercase = (
feature_extractor.size
if isinstance(feature_extractor.size, UpperCAmelCase__ )
else feature_extractor.size["shortest_edge"]
)
__lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, UpperCAmelCase__ )
set_requires_grad(self.clip_model, UpperCAmelCase__ )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : int ):
self.enable_attention_slicing(UpperCAmelCase__ )
def _lowercase ( self : str ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ):
# get the original timestep using init_timestep
__lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ )
__lowercase = max(num_inference_steps - init_timestep, 0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ):
if not isinstance(UpperCAmelCase__, torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" )
__lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ )
]
__lowercase = torch.cat(UpperCAmelCase__, dim=0 )
else:
__lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 0.18_215 * init_latents
__lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 )
__lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
# get latents
__lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = init_latents
return latents
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ):
__lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
__lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ):
__lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ )
__lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ):
__lowercase = latents.detach().requires_grad_()
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowercase = self.scheduler.alphas_cumprod[timestep]
__lowercase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowercase = torch.sqrt(UpperCAmelCase__ )
__lowercase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = self.scheduler.sigmas[index]
__lowercase = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * sample
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ )
__lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype )
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale
__lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0]
if isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = latents.detach() + grads * (sigma**2)
__lowercase = noise_pred_original
else:
__lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1:
__lowercase = [generator] + [None] * (batch_size - 1)
__lowercase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__lowercase = [x[0] for x in coca_is_none if x[1]]
__lowercase = ", ".join(UpperCAmelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
if style_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
# get prompt text embeddings for content and style
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# duplicate text embeddings for each generation per prompt
__lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# set timesteps
__lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowercase = {}
if accepts_offset:
__lowercase = 1
self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device )
__lowercase = timesteps[:1].repeat(UpperCAmelCase__ )
# Preprocess image
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if clip_guidance_scale > 0:
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = slerp(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = content_text_input.input_ids.shape[-1]
__lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to(
self.device )
else:
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
# check if the scheduler accepts generator
__lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowercase = generator
with self.progress_bar(total=UpperCAmelCase__ ):
for i, t in enumerate(UpperCAmelCase__ ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowercase ,__lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowercase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowercase ,__lowercase = self.cond_fn(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * latents
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
| 17 | 0 |
from string import ascii_lowercase, ascii_uppercase
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
if not sentence:
return ""
__UpperCamelCase =dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 62 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
__UpperCAmelCase : Tuple = XGLMConfig
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Union[str, Any] = "gelu"
def __init__( self : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=1_4, UpperCAmelCase__ : str=7, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[Any]=True, UpperCAmelCase__ : int=True, UpperCAmelCase__ : List[str]=9_9, UpperCAmelCase__ : Union[str, Any]=3_2, UpperCAmelCase__ : Union[str, Any]=2, UpperCAmelCase__ : Union[str, Any]=4, UpperCAmelCase__ : Tuple=3_7, UpperCAmelCase__ : List[Any]="gelu", UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Tuple=5_1_2, UpperCAmelCase__ : Optional[Any]=0.02, ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = d_model
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = ffn_dim
__lowercase = activation_function
__lowercase = activation_dropout
__lowercase = attention_dropout
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = None
__lowercase = 0
__lowercase = 2
__lowercase = 1
def _lowercase ( self : Union[str, Any] ):
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _lowercase ( self : Tuple ):
__lowercase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = self.get_config()
__lowercase = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self : List[Any] ):
return XGLMConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=UpperCAmelCase__, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=UpperCAmelCase__, )
def _lowercase ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCAmelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def _lowercase ( self : Optional[Any] ):
__lowercase = TFXGLMModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, n_embd=3_7 )
def _lowercase ( self : Any ):
self.config_tester.run_common_tests()
@slow
def _lowercase ( self : List[str] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFXGLMModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _lowercase ( self : int ):
super().test_resize_token_embeddings()
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int]=True ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]], dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowercase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[Any] ):
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
__lowercase = tokenizer("Today is a nice day and", return_tensors="tf" )
__lowercase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, seed=[7, 0] )
__lowercase = tokenizer.decode(output_ids[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
@slow
def _lowercase ( self : Dict ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = "left"
# use different length sentences to test batching
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
__lowercase = tokenizer(UpperCAmelCase__, return_tensors="tf", padding=UpperCAmelCase__ )
__lowercase = inputs["input_ids"]
__lowercase = model.generate(input_ids=UpperCAmelCase__, attention_mask=inputs["attention_mask"], max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[0], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[1], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_non_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, [non_padded_sentence, padded_sentence] )
| 17 | 0 |
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ):
_a = 0.0
_a = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowercase , lowercase )
# Update the winning vector
_a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowercase , lowercase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_a = '__DUMMY_TRANSFORMERS_USER__'
_a = 'Dummy User'
_a = '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'
_a = 'https://hub-ci.huggingface.co'
_a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
_a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
_a = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _A ( UpperCamelCase_ : List[Any]) -> Tuple:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : int) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT", UpperCamelCase_)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Dict:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : List[Any]) -> List[str]:
'''simple docstring'''
HfFolder.save_token(UpperCamelCase_)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def _A ( ) -> List[Any]:
'''simple docstring'''
return HfApi(endpoint=UpperCamelCase_)
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi) -> List[Any]:
'''simple docstring'''
__lowercase = HfFolder.get_token()
HfFolder.save_token(UpperCamelCase_)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Dict) -> int:
'''simple docstring'''
def _cleanup_repo(UpperCamelCase_ : Optional[int]):
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Any:
'''simple docstring'''
@contextmanager
def _temporary_repo(UpperCamelCase_ : Any):
try:
yield repo_id
finally:
cleanup_repo(UpperCamelCase_)
return _temporary_repo
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : str, UpperCamelCase_ : Optional[int]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data/text_data.txt", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Any, UpperCamelCase_ : Dict) -> Optional[int]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]) -> int:
'''simple docstring'''
__lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Dict, UpperCamelCase_ : Any) -> int:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> str:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 17 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
while b:
_snake_case , _snake_case : Optional[int] = b, a % b
return a
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(snake_case__ , a % b )
def UpperCAmelCase__ ():
"""simple docstring"""
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 64 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : int = "time_series_transformer"
__UpperCAmelCase : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : int, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : str = "student_t", UpperCAmelCase__ : str = "nll", UpperCAmelCase__ : int = 1, UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7], UpperCAmelCase__ : Optional[Union[str, bool]] = "mean", UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : str = "gelu", UpperCAmelCase__ : int = 6_4, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : int = 1_0_0, UpperCAmelCase__ : float = 0.02, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : List[str], ):
# time series specific configuration
__lowercase = prediction_length
__lowercase = context_length or prediction_length
__lowercase = distribution_output
__lowercase = loss
__lowercase = input_size
__lowercase = num_time_features
__lowercase = lags_sequence
__lowercase = scaling
__lowercase = num_dynamic_real_features
__lowercase = num_static_real_features
__lowercase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__lowercase = cardinality
else:
__lowercase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__lowercase = embedding_dimension
else:
__lowercase = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality]
__lowercase = num_parallel_samples
# Transformer architecture configuration
__lowercase = input_size * len(UpperCAmelCase__ ) + self._number_of_features
__lowercase = d_model
__lowercase = encoder_attention_heads
__lowercase = decoder_attention_heads
__lowercase = encoder_ffn_dim
__lowercase = decoder_ffn_dim
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = activation_function
__lowercase = init_std
__lowercase = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__, **UpperCAmelCase__ )
@property
def _lowercase ( self : Optional[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 17 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = 9, 14 # noqa: F841
UpperCAmelCase__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase__ = defaultdict(__A )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCAmelCase__ = mst(__A )
UpperCAmelCase__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCAmelCase__ = tuple(answer[:2] )
UpperCAmelCase__ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 65 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : List[Any] ):
pass
def _A ( UpperCamelCase_ : Union[str, Any]) -> Any:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_a = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any] ):
__lowercase = pipeline(
"document-question-answering", model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
__lowercase = INVOICE_URL
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
__lowercase = "What is the placebo?"
__lowercase = [
{
"image": load_image(UpperCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _lowercase ( self : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any ):
__lowercase = dqa_pipeline(UpperCAmelCase__, top_k=2 )
self.assertEqual(
UpperCAmelCase__, [
[
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
]
]
* 3, )
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2" )
__lowercase = INVOICE_URL
__lowercase = "How many cats are there?"
__lowercase = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 3_8, "end": 3_9},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 3_8, "end": 4_0},
]
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
# We can optionnally pass directly the words and bounding boxes
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = []
__lowercase = []
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, words=UpperCAmelCase__, boxes=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : List[str] ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
],
]
* 2, )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Union[str, Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
@slow
@require_torch
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _lowercase ( self : List[Any] ):
pass
| 17 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[Any] , snake_case: List[str] , snake_case: Optional[Any]=13 , snake_case: List[str]=7 , snake_case: Dict=True , snake_case: List[str]=True , snake_case: Optional[int]=True , snake_case: Any=True , snake_case: Optional[Any]=99 , snake_case: Tuple=32 , snake_case: Tuple=5 , snake_case: Dict=4 , snake_case: Optional[Any]=37 , snake_case: Union[str, Any]="gelu" , snake_case: Tuple=0.1 , snake_case: List[Any]=0.1 , snake_case: List[str]=512 , snake_case: Optional[int]=16 , snake_case: int=2 , snake_case: List[Any]=0.0_2 , snake_case: Union[str, Any]=4 , ) -> List[str]:
snake_case_ :Dict = parent
snake_case_ :Any = batch_size
snake_case_ :Any = seq_length
snake_case_ :List[str] = is_training
snake_case_ :Optional[Any] = use_attention_mask
snake_case_ :Dict = use_token_type_ids
snake_case_ :Union[str, Any] = use_labels
snake_case_ :str = vocab_size
snake_case_ :int = hidden_size
snake_case_ :List[str] = num_hidden_layers
snake_case_ :Dict = num_attention_heads
snake_case_ :Any = intermediate_size
snake_case_ :Tuple = hidden_act
snake_case_ :int = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Any = max_position_embeddings
snake_case_ :Union[str, Any] = type_vocab_size
snake_case_ :Optional[int] = type_sequence_label_size
snake_case_ :Union[str, Any] = initializer_range
snake_case_ :Tuple = num_choices
def lowerCAmelCase_ ( self: Tuple ) -> str:
snake_case_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ :Union[str, Any] = None
if self.use_attention_mask:
snake_case_ :str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ :Any = None
if self.use_token_type_ids:
snake_case_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ :int = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self: Optional[int] ) -> int:
snake_case_ :str = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ :Optional[int] = config_and_inputs
snake_case_ :Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self: Optional[Any] ) -> Any:
snake_case_ :int = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ :Dict = config_and_inputs
snake_case_ :Union[str, Any] = True
snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = True
_A : Dict = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self: int ) -> List[str]:
snake_case_ :Any = FlaxBertModelTester(self )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
snake_case_ :Dict = FlaxBertModel.from_pretrained("""bert-base-cased""" )
snake_case_ :Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case )
| 66 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict, *, # begin keyword-only arguments
UpperCAmelCase__ : str="<s>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : str="</s>", UpperCAmelCase__ : Optional[Any]="<unk>", UpperCAmelCase__ : List[Any]=None, ):
__lowercase ,__lowercase ,__lowercase ,__lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase__ )
__lowercase = len(self.symbols )
def __eq__( self : List[str], UpperCAmelCase__ : Dict ):
return self.indices == other.indices
def __getitem__( self : Optional[int], UpperCAmelCase__ : List[str] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ):
return len(self.symbols )
def __contains__( self : Any, UpperCAmelCase__ : Optional[Any] ):
return sym in self.indices
@classmethod
def _lowercase ( cls : List[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = cls()
d.add_from_file(UpperCAmelCase__ )
return d
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : str=False ):
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(UpperCAmelCase__ )
self.count.append(UpperCAmelCase__ )
return idx
def _lowercase ( self : Any, UpperCAmelCase__ : str ):
return 0
def _lowercase ( self : Tuple, UpperCAmelCase__ : List[Any] ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
try:
with open(UpperCAmelCase__, "r", encoding="utf-8" ) as fd:
self.add_from_file(UpperCAmelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCAmelCase__ ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(UpperCAmelCase__ )
for line in lines[indices_start_line:]:
try:
__lowercase ,__lowercase = line.rstrip().rsplit(" ", 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase ,__lowercase = line.rsplit(" ", 1 )
else:
__lowercase = False
__lowercase = int(UpperCAmelCase__ )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(UpperCAmelCase__ ) )
self.add_symbol(UpperCAmelCase__, n=UpperCAmelCase__, overwrite=UpperCAmelCase__ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def _A ( UpperCamelCase_ : int) -> str:
'''simple docstring'''
__lowercase = dict((re.sub(r"@@$", "", UpperCamelCase_), v) if k.endswith("@@") else (re.sub(r"$", "</w>", UpperCamelCase_), v) for k, v in d.items())
__lowercase = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowercase = d[k] # restore
return da
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> List[Any]:
'''simple docstring'''
if not os.path.exists(UpperCamelCase_):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""")
os.makedirs(UpperCamelCase_, exist_ok=UpperCamelCase_)
print(F"""Writing results to {pytorch_dump_folder_path}""")
# handle various types of models
__lowercase = os.path.join(UpperCamelCase_, "checkpoint.pt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""")
__lowercase = torch.load(UpperCamelCase_, map_location="cpu")
__lowercase = chkpt["cfg"]["model"]
# dicts
__lowercase = os.path.join(UpperCamelCase_, "dict.txt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {dict_file} does not exist!""")
__lowercase = Dictionary.load(UpperCamelCase_)
__lowercase = rewrite_dict_keys(src_dict.indices)
__lowercase = len(UpperCamelCase_)
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["vocab_file"])
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# merges_file (bpecodes)
__lowercase = os.path.join(UpperCamelCase_, "bpecodes")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""")
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["merges_file"])
shutil.copyfile(UpperCamelCase_, UpperCamelCase_)
# model config
__lowercase = os.path.join(UpperCamelCase_, "config.json")
__lowercase = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# tokenizer config
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
__lowercase = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F"""Generating {biogpt_tokenizer_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# model
__lowercase = chkpt["model"]
# remove unneeded keys
__lowercase = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase_, UpperCamelCase_)
__lowercase = list(model_state_dict.keys())
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight"):
__lowercase = model_state_dict.pop(UpperCamelCase_)
else:
__lowercase = model_state_dict.pop(UpperCamelCase_)
__lowercase = BioGptConfig.from_pretrained(UpperCamelCase_)
__lowercase = BioGptForCausalLM(UpperCamelCase_)
# check that it loads ok
model_new.load_state_dict(UpperCamelCase_)
# save
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
print(F"""Generating {pytorch_weights_dump_path}""")
torch.save(UpperCamelCase_, UpperCamelCase_)
print("Conversion is done!")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 17 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ) -> tuple[int, float, str]:
__lowerCamelCase = cipher_alphabet or [chr(UpperCamelCase__ ) for i in range(97 , 1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__lowerCamelCase = {
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
__lowerCamelCase = frequencies_dict
if not case_sensitive:
__lowerCamelCase = ciphertext.lower()
# Chi squared statistic values
__lowerCamelCase = {}
# cycle through all of the shifts
for shift in range(len(UpperCamelCase__ ) ):
__lowerCamelCase = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__lowerCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCamelCase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__lowerCamelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__lowerCamelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__lowerCamelCase = decrypted_with_shift.lower().count(UpperCamelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowerCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowerCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__lowerCamelCase = decrypted_with_shift.count(UpperCamelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowerCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowerCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__lowerCamelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCamelCase__ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__lowerCamelCase = min(
UpperCamelCase__ , key=UpperCamelCase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 67 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any, UpperCAmelCase__ : int ):
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
self.m_edges.append([u_node, v_node, weight] )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowercase ( self : List[Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase__ )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(UpperCAmelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase__ )
def _lowercase ( self : Any ):
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _A ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
A__ = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = "sshleifer/tiny-gpt2"
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
A__ = PyTorchBenchmark(lowercase )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = "sgugger/tiny-distilbert-classification"
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , only_pretrain_model=lowercase , )
A__ = PyTorchBenchmark(lowercase )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = "sshleifer/tiny-gpt2"
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , torchscript=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
A__ = PyTorchBenchmark(lowercase )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = "sshleifer/tiny-gpt2"
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , fpaa=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
A__ = PyTorchBenchmark(lowercase )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = "sshleifer/tiny-gpt2"
A__ = AutoConfig.from_pretrained(lowercase )
# set architectures equal to `None`
A__ = None
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
A__ = PyTorchBenchmark(lowercase , configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = "sshleifer/tiny-gpt2"
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
A__ = PyTorchBenchmark(lowercase )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = "sshleifer/tiny-gpt2"
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase , multi_process=lowercase , )
A__ = PyTorchBenchmark(lowercase )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = "sshleifer/tiny-gpt2"
A__ = AutoConfig.from_pretrained(lowercase )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
A__ = PyTorchBenchmark(lowercase , configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = "sshleifer/tinier_bart"
A__ = AutoConfig.from_pretrained(lowercase )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
A__ = PyTorchBenchmark(lowercase , configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = "sshleifer/tiny-gpt2"
A__ = AutoConfig.from_pretrained(lowercase )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
A__ = PyTorchBenchmark(lowercase , configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = "sshleifer/tinier_bart"
A__ = AutoConfig.from_pretrained(lowercase )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
A__ = PyTorchBenchmark(lowercase , configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , save_to_csv=lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase , "inf_time.csv" ) , train_memory_csv_file=os.path.join(lowercase , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(lowercase , "inf_mem.csv" ) , train_time_csv_file=os.path.join(lowercase , "train_time.csv" ) , env_info_csv_file=os.path.join(lowercase , "env.csv" ) , multi_process=lowercase , )
A__ = PyTorchBenchmark(lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , "env.csv" ) ).exists() )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(lowercase ):
self.assertTrue(hasattr(lowercase , "sequential" ) )
self.assertTrue(hasattr(lowercase , "cumulative" ) )
self.assertTrue(hasattr(lowercase , "current" ) )
self.assertTrue(hasattr(lowercase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase , "log.txt" ) , log_print=lowercase , trace_memory_line_by_line=lowercase , multi_process=lowercase , )
A__ = PyTorchBenchmark(lowercase )
A__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase , "log.txt" ) ).exists() )
| 68 |
"""simple docstring"""
from math import sqrt
def _A ( UpperCamelCase_ : int) -> int:
'''simple docstring'''
__lowercase = 0
for i in range(1, int(sqrt(UpperCamelCase_) + 1)):
if n % i == 0 and i != sqrt(UpperCamelCase_):
total += i + n // i
elif i == sqrt(UpperCamelCase_):
total += i
return total - n
def _A ( UpperCamelCase_ : int = 10000) -> int:
'''simple docstring'''
__lowercase = sum(
i
for i in range(1, UpperCamelCase_)
if sum_of_divisors(sum_of_divisors(UpperCamelCase_)) == i and sum_of_divisors(UpperCamelCase_) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 17 | 0 |
"""simple docstring"""
import sys
__UpperCamelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase ( UpperCAmelCase ) -> int:
snake_case_ = 1
for digit in s:
product *= int(UpperCAmelCase )
return product
def UpperCAmelCase ( UpperCAmelCase = N ) -> int:
snake_case_ = -sys.maxsize - 1
snake_case_ = n[:13]
snake_case_ = 13
while cur_index < len(UpperCAmelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case_ = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case_ = max(UpperCAmelCase , str_eval(UpperCAmelCase ) )
snake_case_ = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 69 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a = _symbol_database.Default()
_a = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
_a = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a = None
_a = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a = 45
_a = 15_81
_a = 15_17
_a = 15_70
_a = 15_84
_a = 17_93
_a = 17_95
_a = 19_16
_a = 18_64
_a = 19_05
_a = 19_19
_a = 24_29
_a = 22_08
_a = 24_18
_a = 23_23
_a = 24_07
# @@protoc_insertion_point(module_scope)
| 17 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] =logging.get_logger(__name__)
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
_lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase = """"""
else:
_lowerCAmelCase = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase = in_proj_bias[: config.hidden_size]
_lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = dct.pop(lowerCAmelCase )
_lowerCAmelCase = val
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = DeiTConfig()
# all deit models have fine-tuned heads
_lowerCAmelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_lowerCAmelCase = 10_00
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """imagenet-1k-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = int(deit_name[-6:-4] )
_lowerCAmelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
_lowerCAmelCase = 1_92
_lowerCAmelCase = 7_68
_lowerCAmelCase = 12
_lowerCAmelCase = 3
elif deit_name[9:].startswith("""small""" ):
_lowerCAmelCase = 3_84
_lowerCAmelCase = 15_36
_lowerCAmelCase = 12
_lowerCAmelCase = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
_lowerCAmelCase = 10_24
_lowerCAmelCase = 40_96
_lowerCAmelCase = 24
_lowerCAmelCase = 16
# load original model from timm
_lowerCAmelCase = timm.create_model(lowerCAmelCase , pretrained=lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase = timm_model.state_dict()
_lowerCAmelCase = create_rename_keys(lowerCAmelCase , lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# load HuggingFace model
_lowerCAmelCase = DeiTForImageClassificationWithTeacher(lowerCAmelCase ).eval()
model.load_state_dict(lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
_lowerCAmelCase = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_lowerCAmelCase = DeiTImageProcessor(size=lowerCAmelCase , crop_size=config.image_size )
_lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="""pt""" )
_lowerCAmelCase = encoding["""pixel_values"""]
_lowerCAmelCase = model(lowerCAmelCase )
_lowerCAmelCase = timm_model(lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
A__ : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : Tuple =parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 70 |
"""simple docstring"""
import baseaa
def _A ( UpperCamelCase_ : str) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8"))
def _A ( UpperCamelCase_ : bytes) -> str:
'''simple docstring'''
return baseaa.baadecode(UpperCamelCase_).decode("utf-8")
if __name__ == "__main__":
_a = 'Hello World!'
_a = baseaa_encode(test)
print(encoded)
_a = baseaa_decode(encoded)
print(decoded)
| 17 | 0 |
def A ( a_ ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def A ( a_ ) -> bool:
__UpperCamelCase : Union[str, Any] =0
__UpperCamelCase : Optional[int] =number
while duplicate > 0:
__UpperCamelCase , __UpperCamelCase : Any =divmod(a_ ,10 )
fact_sum += factorial(a_ )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
A_ :Tuple = int(input('''Enter number: ''').strip())
print(
f"{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."
)
| 71 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Any) -> List[str]:
'''simple docstring'''
__lowercase ,__lowercase = [], []
while len(UpperCamelCase_) > 1:
__lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_)
start.append(UpperCamelCase_)
end.append(UpperCamelCase_)
collection.remove(UpperCamelCase_)
collection.remove(UpperCamelCase_)
end.reverse()
return start + collection + end
if __name__ == "__main__":
_a = input('Enter numbers separated by a comma:\n').strip()
_a = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 17 | 0 |
"""simple docstring"""
from collections import defaultdict
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Dict = 1
_lowerCamelCase : List[Any] = True
for v in tree[start]:
if v not in visited:
ret += dfs(A_ )
if ret % 2 == 0:
cuts.append(A_ )
return ret
def snake_case_ ( ):
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
lowerCAmelCase__ , lowerCAmelCase__ = 10, 9
lowerCAmelCase__ = defaultdict(list)
lowerCAmelCase__ = {}
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 72 |
"""simple docstring"""
def _A ( UpperCamelCase_ : list[int]) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty")
__lowercase = sum(UpperCamelCase_) / len(UpperCamelCase_) # Calculate the average
return sum(abs(x - average) for x in nums) / len(UpperCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
import argparse
import os
import re
a ="""src/transformers"""
# Pattern that looks at the indentation in a line.
a =re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
a =re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a =re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
a =re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a =re.compile(r"""\[([^\]]+)\]""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : List[str] = _re_indent.search(lowerCamelCase__ )
return "" if search is None else search.groups()[0]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__="" , lowerCamelCase__=None , lowerCamelCase__=None ) -> Any:
__lowerCamelCase : int = 0
__lowerCamelCase : int = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase__ ):
index += 1
__lowerCamelCase : Dict = ['\n'.join(lines[:index] )]
else:
__lowerCamelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowerCamelCase : str = [lines[index]]
index += 1
while index < len(lowerCamelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(lowerCamelCase__ ) )
if index < len(lowerCamelCase__ ) - 1:
__lowerCamelCase : Dict = [lines[index + 1]]
index += 1
else:
__lowerCamelCase : List[str] = []
else:
blocks.append('\n'.join(lowerCamelCase__ ) )
__lowerCamelCase : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase__ ) > 0:
blocks.append('\n'.join(lowerCamelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
def _inner(lowerCamelCase__ ):
return key(lowerCamelCase__ ).lower().replace('_' , '' )
return _inner
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(lowerCamelCase__ ):
return x
if key is None:
__lowerCamelCase : Dict = noop
# Constants are all uppercase, they go first.
__lowerCamelCase : List[Any] = [obj for obj in objects if key(lowerCamelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowerCamelCase : Union[str, Any] = [obj for obj in objects if key(lowerCamelCase__ )[0].isupper() and not key(lowerCamelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowerCamelCase : Union[str, Any] = [obj for obj in objects if not key(lowerCamelCase__ )[0].isupper()]
__lowerCamelCase : List[Any] = ignore_underscore(lowerCamelCase__ )
return sorted(lowerCamelCase__ , key=lowerCamelCase__ ) + sorted(lowerCamelCase__ , key=lowerCamelCase__ ) + sorted(lowerCamelCase__ , key=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
# This inner function sort imports between [ ].
def _replace(lowerCamelCase__ ):
__lowerCamelCase : Dict = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowerCamelCase : Any = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCamelCase : List[Any] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(lowerCamelCase__ )] ) + "]"
__lowerCamelCase : int = import_statement.split('\n' )
if len(lowerCamelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowerCamelCase : Any = 2 if lines[1].strip() == '[' else 1
__lowerCamelCase : Optional[Any] = [(i, _re_strip_line.search(lowerCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowerCamelCase : Tuple = sort_objects(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[1] )
__lowerCamelCase : Optional[int] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowerCamelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowerCamelCase : int = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCamelCase : Optional[int] = keys[:-1]
__lowerCamelCase : List[str] = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(lowerCamelCase__ )] )
return "\n".join(lowerCamelCase__ )
else:
# Finally we have to deal with imports fitting on one line
__lowerCamelCase : Any = _re_bracket_content.sub(_replace , lowerCamelCase__ )
return import_statement
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=True ) -> Dict:
with open(lowerCamelCase__ , encoding='utf-8' ) as f:
__lowerCamelCase : int = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowerCamelCase : str = split_code_in_indented_blocks(
lowerCamelCase__ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowerCamelCase : Optional[int] = main_blocks[block_idx]
__lowerCamelCase : str = block.split('\n' )
# Get to the start of the imports.
__lowerCamelCase : Union[str, Any] = 0
while line_idx < len(lowerCamelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowerCamelCase : List[Any] = len(lowerCamelCase__ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowerCamelCase : Dict = '\n'.join(block_lines[line_idx:-1] )
__lowerCamelCase : Any = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowerCamelCase : Any = split_code_in_indented_blocks(lowerCamelCase__ , indent_level=lowerCamelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowerCamelCase : Optional[Any] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowerCamelCase : Optional[Any] = [(pattern.search(lowerCamelCase__ ).groups()[0] if pattern.search(lowerCamelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowerCamelCase : Optional[Any] = [(i, key) for i, key in enumerate(lowerCamelCase__ ) if key is not None]
__lowerCamelCase : str = [x[0] for x in sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__lowerCamelCase : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase__ )
count += 1
# And we put our main block back together with its first and last line.
__lowerCamelCase : Tuple = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=True ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = []
for root, _, files in os.walk(lowerCamelCase__ ):
if "__init__.py" in files:
__lowerCamelCase : Optional[Any] = sort_imports(os.path.join(lowerCamelCase__ , '__init__.py' ) , check_only=lowerCamelCase__ )
if result:
__lowerCamelCase : Tuple = [os.path.join(lowerCamelCase__ , '__init__.py' )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(F"Would overwrite {len(lowerCamelCase__ )} files, run `make style`." )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
a =parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 73 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ):
__lowercase = parent
__lowercase = vocab_size
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def _lowercase ( self : int ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size )
__lowercase = BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, )
return config, pixel_values, labels
def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ):
__lowercase = FlaxBeitModel(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ):
__lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ):
__lowercase = self.type_sequence_label_size
__lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _lowercase ( self : List[Any] ):
__lowercase = FlaxBeitModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(UpperCAmelCase__ )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1], UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ):
return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape, output.shape )
def _lowercase ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(UpperCAmelCase__ )
def _A ( ) -> str:
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_vision
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values
# prepare bool_masked_pos
__lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ )
# forward pass
__lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) )
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_0_0_0)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_8_1
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[str] ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 2_1_8_4_1)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
| 17 | 0 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = RoFormerTokenizer
_lowerCamelCase: Any = RoFormerTokenizerFast
_lowerCamelCase: Optional[int] = True
_lowerCamelCase: Any = True
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
super().setUp()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,**A_ : Tuple ) -> Any:
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,**A_ : Optional[int] ) -> Dict:
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = '永和服装饰品有限公司,今天天气非常好'
A = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = self.get_tokenizer()
A , A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = self.get_rust_tokenizer()
A , A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
pass
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
pass | 74 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowerCAmelCase ( unittest.TestCase ,lowercase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
__lowercase = load_tool("text-classification" )
self.tool.setup()
__lowercase = load_tool("text-classification", remote=UpperCAmelCase__ )
def _lowercase ( self : str ):
__lowercase = self.tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : str ):
__lowercase = self.remote_tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : List[str] ):
__lowercase = self.tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : Tuple ):
__lowercase = self.remote_tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
| 17 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a_ : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={}
if "candidate_labels" in kwargs:
lowerCamelCase_ =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowerCamelCase_ =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase="This is a sound of {}." ):
"""simple docstring"""
if isinstance(lowerCAmelCase, lowerCAmelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCamelCase_ =requests.get(lowerCAmelCase ).content
else:
with open(lowerCAmelCase, '''rb''' ) as f:
lowerCamelCase_ =f.read()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =ffmpeg_read(lowerCAmelCase, self.feature_extractor.sampling_rate )
if not isinstance(lowerCAmelCase, np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
lowerCamelCase_ =self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' )
lowerCamelCase_ =candidate_labels
lowerCamelCase_ =[hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels]
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework, padding=lowerCAmelCase )
lowerCamelCase_ =[text_inputs]
return inputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model_inputs.pop('''candidate_labels''' )
lowerCamelCase_ =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0], lowerCAmelCase ):
lowerCamelCase_ =text_inputs[0]
else:
# Batching case.
lowerCamelCase_ =text_inputs[0][0]
lowerCamelCase_ =self.model(**lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model_outputs.pop('''candidate_labels''' )
lowerCamelCase_ =model_outputs['''logits'''][0]
if self.framework == "pt":
lowerCamelCase_ =logits.softmax(dim=0 )
lowerCamelCase_ =probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
lowerCamelCase_ =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase, lowerCAmelCase ), key=lambda lowerCAmelCase : -x[0] )
]
return result
| 75 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_a = 'CompVis/stable-diffusion-v1-1'
_a = 'CompVis/stable-diffusion-v1-2'
_a = 'CompVis/stable-diffusion-v1-3'
_a = 'CompVis/stable-diffusion-v1-4'
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCAmelCase__ : StableDiffusionSafetyChecker, UpperCAmelCase__ : CLIPImageProcessor, UpperCAmelCase__ : bool = True, ):
super()._init_()
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, requires_safety_checker=UpperCAmelCase__, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea )
@property
def _lowercase ( self : List[str] ):
return {k: getattr(self, UpperCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )}
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Tuple, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Any, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Optional[int], ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
__lowercase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(UpperCAmelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 17 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='speech_to_text'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] , a : Optional[int]=1_0000 , a : Any=12 , a : List[Any]=2048 , a : Any=4 , a : str=6 , a : List[str]=2048 , a : str=4 , a : Tuple=0.0 , a : Dict=0.0 , a : Union[str, Any]=True , a : Any=True , a : Tuple="relu" , a : int=256 , a : Dict=0.1 , a : int=0.0 , a : List[str]=0.0 , a : Dict=0.02 , a : Tuple=2 , a : Tuple=True , a : Optional[Any]=1 , a : int=0 , a : Tuple=2 , a : str=6000 , a : List[Any]=1024 , a : int=2 , a : Optional[Any]=(5, 5) , a : Dict=1024 , a : int=80 , a : Optional[int]=1 , **a : str , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : int = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = decoder_layers
SCREAMING_SNAKE_CASE : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = dropout
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = activation_dropout
SCREAMING_SNAKE_CASE : str = activation_function
SCREAMING_SNAKE_CASE : Any = init_std
SCREAMING_SNAKE_CASE : Any = encoder_layerdrop
SCREAMING_SNAKE_CASE : int = decoder_layerdrop
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : str = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : Union[str, Any] = max_source_positions
SCREAMING_SNAKE_CASE : str = max_target_positions
SCREAMING_SNAKE_CASE : Optional[int] = num_conv_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = list(a )
SCREAMING_SNAKE_CASE : Optional[int] = conv_channels
SCREAMING_SNAKE_CASE : Dict = input_feat_per_channel
SCREAMING_SNAKE_CASE : Optional[Any] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , is_encoder_decoder=a , decoder_start_token_id=a , **a , ) | 76 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = "ssube/stable-diffusion-x4-upscaler-onnx"
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[str]=0 ):
__lowercase = floats_tensor((1, 3, 1_2_8, 1_2_8), rng=random.Random(UpperCAmelCase__ ) )
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _lowercase ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : int ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : str ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : Dict ):
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _lowercase ( self : Dict ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowercase ( self : str ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
__lowercase = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" )
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", scheduler=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 17 | 0 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : List[Any] = ["image_processor", "tokenizer"]
lowerCamelCase__ : Union[str, Any] = "OwlViTImageProcessor"
lowerCamelCase__ : Tuple = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , a=None , a=None , **a ) -> Optional[int]:
lowercase__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowercase__ : List[Any] = kwargs.pop('feature_extractor' )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self , a=None , a=None , a=None , a="max_length" , a="np" , **a ) -> List[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(a , a ) or (isinstance(a , a ) and not isinstance(text[0] , a )):
lowercase__ : int = [self.tokenizer(a , padding=a , return_tensors=a , **a )]
elif isinstance(a , a ) and isinstance(text[0] , a ):
lowercase__ : Union[str, Any] = []
# Maximum number of queries across batch
lowercase__ : str = max([len(a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(a ) != max_num_queries:
lowercase__ : Optional[int] = t + [' '] * (max_num_queries - len(a ))
lowercase__ : Union[str, Any] = self.tokenizer(a , padding=a , return_tensors=a , **a )
encodings.append(a )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowercase__ : List[str] = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase__ : Union[str, Any] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase__ : List[str] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase__ : Optional[int] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase__ : Dict = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowercase__ : Tuple = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase__ : str = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase__ : Tuple = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowercase__ : Dict = BatchEncoding()
lowercase__ : str = input_ids
lowercase__ : Dict = attention_mask
if query_images is not None:
lowercase__ : Union[str, Any] = BatchEncoding()
lowercase__ : Any = self.image_processor(
a , return_tensors=a , **a ).pixel_values
lowercase__ : str = query_pixel_values
if images is not None:
lowercase__ : List[Any] = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
lowercase__ : str = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase__ : List[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _UpperCAmelCase ( self , *a , **a ) -> Tuple:
return self.image_processor.post_process(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Dict:
return self.image_processor.post_process_object_detection(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Optional[Any]:
return self.image_processor.post_process_image_guided_detection(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Optional[int]:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Union[str, Any]:
return self.tokenizer.decode(*a , **a )
@property
def _UpperCAmelCase ( self ) -> Tuple:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , )
return self.image_processor
| 77 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_a = datasets.utils.logging.get_logger(__name__)
_a = ['names', 'prefix']
_a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_a = ['encoding_errors', 'on_bad_lines']
_a = ['date_format']
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : str = ","
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[Union[int, List[int], str]] = "infer"
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[Union[int, str, List[int], List[str]]] = None
__UpperCAmelCase : Optional[Union[List[int], List[str]]] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[Literal["c", "python", "pyarrow"]] = None
__UpperCAmelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[Union[int, List[int]]] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[Union[str, List[str]]] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = "."
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = '"'
__UpperCAmelCase : int = 0
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : int = 1_0_0_0_0
__UpperCAmelCase : Optional[datasets.Features] = None
__UpperCAmelCase : Optional[str] = "strict"
__UpperCAmelCase : Literal["error", "warn", "skip"] = "error"
__UpperCAmelCase : Optional[str] = None
def _lowercase ( self : Tuple ):
if self.delimiter is not None:
__lowercase = self.delimiter
if self.column_names is not None:
__lowercase = self.column_names
@property
def _lowercase ( self : Union[str, Any] ):
__lowercase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), UpperCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : Tuple = CsvConfig
def _lowercase ( self : List[str] ):
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowercase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase__, (str, list, tuple) ):
__lowercase = data_files
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )]
__lowercase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__, gen_kwargs={"files": files} ) )
return splits
def _lowercase ( self : Dict, UpperCAmelCase__ : pa.Table ):
if self.config.features is not None:
__lowercase = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
__lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=UpperCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__lowercase = table_cast(UpperCAmelCase__, UpperCAmelCase__ )
return pa_table
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : List[str] ):
__lowercase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__lowercase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ):
__lowercase = pd.read_csv(UpperCAmelCase__, iterator=UpperCAmelCase__, dtype=UpperCAmelCase__, **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCAmelCase__ ):
__lowercase = pa.Table.from_pandas(UpperCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase__ )}: {e}""" )
raise
| 17 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
if index == r:
for j in range(lowercase_ ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase = arr[i]
combination_util(lowercase_ , lowercase_ , lowercase_ , index + 1 , lowercase_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
# A temporary array to store all combination one by one
UpperCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase_ , lowercase_ , lowercase_ , 0 , lowercase_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
snake_case_ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 78 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
_a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
_a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
_a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ):
__lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 17 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Any=10 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : int=32 * 8 , __UpperCAmelCase : int=32 * 8 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : Optional[int]=64 , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = is_training
_A = use_auxiliary_loss
_A = num_queries
_A = num_channels
_A = min_size
_A = max_size
_A = num_labels
_A = hidden_dim
_A = hidden_dim
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__UpperCAmelCase )
_A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__UpperCAmelCase )
_A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__UpperCAmelCase ) > 0.5
).float()
_A = (torch.rand((self.batch_size, self.num_labels) , device=__UpperCAmelCase ) > 0.5).long()
_A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_A = self.num_queries
_A = self.num_labels
_A = [1, 1, 1, 1]
_A = self.num_channels
_A = 64
_A = 128
_A = self.hidden_dim
_A = self.hidden_dim
_A = self.hidden_dim
return config
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A , _A , _A , _A , _A = self.prepare_config_and_inputs()
_A = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
_A = output.encoder_hidden_states
_A = output.pixel_decoder_hidden_states
_A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) , config.decoder_layers )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=False ):
'''simple docstring'''
with torch.no_grad():
_A = MaskaFormerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase )
_A = model(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
_A = MaskaFormerForUniversalSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
def comm_check_on_output(__UpperCAmelCase : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_A = model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase )
_A = model(__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
_A = model(
pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = MaskaFormerModelTester(self )
_A = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__UpperCAmelCase )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__UpperCAmelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_A = MaskaFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = (self.model_tester.min_size,) * 2
_A = {
"pixel_values": torch.randn((2, 3, *size) , device=__UpperCAmelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=__UpperCAmelCase ),
"class_labels": torch.zeros(2 , 10 , device=__UpperCAmelCase ).long(),
}
_A = self.model_tester.get_config()
_A = MaskaFormerForUniversalSegmentation(__UpperCAmelCase ).to(__UpperCAmelCase )
_A = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
_A = model(**__UpperCAmelCase , output_attentions=__UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_A = self.all_model_classes[1]
_A , _A , _A , _A , _A = self.model_tester.prepare_config_and_inputs()
_A = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
_A = model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase ).loss
loss.backward()
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.all_model_classes[1]
_A , _A , _A , _A , _A = self.model_tester.prepare_config_and_inputs()
_A = True
_A = True
_A = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
model.train()
_A = model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase )
_A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase_ = 1e-4
def __lowercase ( ) -> Optional[int]:
'''simple docstring'''
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__UpperCAmelCase )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
_A = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
_A = model(**__UpperCAmelCase )
_A = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
_A = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
_A = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__UpperCAmelCase ).eval()
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
_A = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
_A = model(**__UpperCAmelCase )
# masks_queries_logits
_A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_A = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
_A = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
# class_queries_logits
_A = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_A = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__UpperCAmelCase ).eval()
_A = self.default_image_processor
_A = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
_A = inputs["pixel_values"].to(__UpperCAmelCase )
_A = [el.to(__UpperCAmelCase ) for el in inputs["mask_labels"]]
_A = [el.to(__UpperCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
_A = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 79 |
"""simple docstring"""
from collections.abc import Sequence
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_))
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
__lowercase = 0.0
for coeff in reversed(UpperCamelCase_):
__lowercase = result * x + coeff
return result
if __name__ == "__main__":
_a = (0.0, 0.0, 5.0, 9.3, 7.0)
_a = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase_ ( a__ ):
@staticmethod
@abstractmethod
def __a ( a ):
raise NotImplementedError()
@abstractmethod
def __a ( self ):
raise NotImplementedError()
| 80 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : str ):
super().__init__()
__lowercase = model
__lowercase = 2
__lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels )
def _lowercase ( self : Optional[int] ):
pass
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = LongformerModel.from_pretrained(UpperCamelCase_)
__lowercase = LightningModel(UpperCamelCase_)
__lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu"))
lightning_model.load_state_dict(ckpt["state_dict"])
# init longformer question answering model
__lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_)
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase_)
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 17 | 0 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = DistilBertTokenizer
__lowerCAmelCase = DistilBertTokenizerFast
__lowerCAmelCase = True
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
a =tokenizer.encode('''sequence builders''' , add_special_tokens=__A )
a =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A )
a =tokenizer.build_inputs_with_special_tokens(__A )
a =tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
] | 81 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, )
assert hasattr(self, "env" )
def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ):
# configuration for running training on smdistributed Model Parallel
__lowercase = {
"enabled": True,
"processes_per_host": 8,
}
__lowercase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_0_0,
}, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", )
def _lowercase ( self : Tuple, UpperCAmelCase__ : int ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ):
# create estimator
__lowercase = self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
| 17 | 0 |
from __future__ import annotations
import time
A__ = list[tuple[int, int]]
A__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowerCAmelCase :
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = pos_x
_lowerCAmelCase = pos_y
_lowerCAmelCase = (pos_y, pos_x)
_lowerCAmelCase = goal_x
_lowerCAmelCase = goal_y
_lowerCAmelCase = parent
class __lowerCAmelCase :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , _snake_case )
_lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , _snake_case )
_lowerCAmelCase = [self.start]
_lowerCAmelCase = False
def snake_case ( self ):
"""simple docstring"""
while self.node_queue:
_lowerCAmelCase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_lowerCAmelCase = True
return self.retrace_path(_snake_case )
_lowerCAmelCase = self.get_successors(_snake_case )
for node in successors:
self.node_queue.append(_snake_case )
if not self.reached:
return [self.start.pos]
return None
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = []
for action in delta:
_lowerCAmelCase = parent.pos_x + action[1]
_lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , _snake_case ) )
return successors
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = node
_lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase = current_node.parent
path.reverse()
return path
class __lowerCAmelCase :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = BreadthFirstSearch(_snake_case , _snake_case )
_lowerCAmelCase = BreadthFirstSearch(_snake_case , _snake_case )
_lowerCAmelCase = False
def snake_case ( self ):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_lowerCAmelCase = self.fwd_bfs.node_queue.pop(0 )
_lowerCAmelCase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_lowerCAmelCase = True
return self.retrace_bidirectional_path(
_snake_case , _snake_case )
_lowerCAmelCase = current_bwd_node
_lowerCAmelCase = current_fwd_node
_lowerCAmelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(_snake_case ),
self.bwd_bfs: self.bwd_bfs.get_successors(_snake_case ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_snake_case )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.fwd_bfs.retrace_path(_snake_case )
_lowerCAmelCase = self.bwd_bfs.retrace_path(_snake_case )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A__ = (0, 0)
A__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A__ = time.time()
A__ = BreadthFirstSearch(init, goal)
A__ = bfs.search()
A__ = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
A__ = time.time()
A__ = BidirectionalBreadthFirstSearch(init, goal)
A__ = bd_bfs.search()
A__ = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 82 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "openai/whisper-base"
__UpperCAmelCase : Union[str, Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCAmelCase : List[str] = "transcriber"
__UpperCAmelCase : Optional[Any] = WhisperProcessor
__UpperCAmelCase : str = WhisperForConditionalGeneration
__UpperCAmelCase : List[str] = ["audio"]
__UpperCAmelCase : Tuple = ["text"]
def _lowercase ( self : str, UpperCAmelCase__ : int ):
return self.pre_processor(UpperCAmelCase__, return_tensors="pt" ).input_features
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ):
return self.model.generate(inputs=UpperCAmelCase__ )
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int] ):
return self.pre_processor.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )[0]
| 17 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase__ ( lowercase ):
lowercase__ = """openai/whisper-base"""
lowercase__ = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
lowercase__ = """transcriber"""
lowercase__ = WhisperProcessor
lowercase__ = WhisperForConditionalGeneration
lowercase__ = ["""audio"""]
lowercase__ = ["""text"""]
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ ,return_tensors='pt' ).input_features
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )[0]
| 83 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase_, torch.Tensor):
return image
elif isinstance(UpperCamelCase_, PIL.Image.Image):
__lowercase = [image]
if isinstance(image[0], PIL.Image.Image):
__lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
__lowercase = np.concatenate(UpperCamelCase_, axis=0)
__lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0
__lowercase = image.transpose(0, 3, 1, 2)
__lowercase = 2.0 * image - 1.0
__lowercase = torch.from_numpy(UpperCamelCase_)
elif isinstance(image[0], torch.Tensor):
__lowercase = torch.cat(UpperCamelCase_, dim=0)
return image
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int:
'''simple docstring'''
if not isinstance(UpperCamelCase_, np.ndarray):
__lowercase = True
__lowercase = va.device
__lowercase = va.cpu().numpy()
__lowercase = va.cpu().numpy()
__lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_)))
if np.abs(UpperCamelCase_) > DOT_THRESHOLD:
__lowercase = (1 - t) * va + t * va
else:
__lowercase = np.arccos(UpperCamelCase_)
__lowercase = np.sin(UpperCamelCase_)
__lowercase = theta_a * t
__lowercase = np.sin(UpperCamelCase_)
__lowercase = np.sin(theta_a - theta_t) / sin_theta_a
__lowercase = sin_theta_t / sin_theta_a
__lowercase = sa * va + sa * va
if inputs_are_torch:
__lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_)
return va
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int:
'''simple docstring'''
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]:
'''simple docstring'''
for param in model.parameters():
__lowercase = value
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ):
super().__init__()
self.register_modules(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, )
__lowercase = (
feature_extractor.size
if isinstance(feature_extractor.size, UpperCAmelCase__ )
else feature_extractor.size["shortest_edge"]
)
__lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, UpperCAmelCase__ )
set_requires_grad(self.clip_model, UpperCAmelCase__ )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : int ):
self.enable_attention_slicing(UpperCAmelCase__ )
def _lowercase ( self : str ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ):
# get the original timestep using init_timestep
__lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ )
__lowercase = max(num_inference_steps - init_timestep, 0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ):
if not isinstance(UpperCAmelCase__, torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" )
__lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ )
]
__lowercase = torch.cat(UpperCAmelCase__, dim=0 )
else:
__lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 0.18_215 * init_latents
__lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 )
__lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
# get latents
__lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = init_latents
return latents
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ):
__lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
__lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ):
__lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ )
__lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ):
__lowercase = latents.detach().requires_grad_()
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowercase = self.scheduler.alphas_cumprod[timestep]
__lowercase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowercase = torch.sqrt(UpperCAmelCase__ )
__lowercase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = self.scheduler.sigmas[index]
__lowercase = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * sample
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ )
__lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype )
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale
__lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0]
if isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = latents.detach() + grads * (sigma**2)
__lowercase = noise_pred_original
else:
__lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1:
__lowercase = [generator] + [None] * (batch_size - 1)
__lowercase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__lowercase = [x[0] for x in coca_is_none if x[1]]
__lowercase = ", ".join(UpperCAmelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
if style_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
# get prompt text embeddings for content and style
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# duplicate text embeddings for each generation per prompt
__lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# set timesteps
__lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowercase = {}
if accepts_offset:
__lowercase = 1
self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device )
__lowercase = timesteps[:1].repeat(UpperCAmelCase__ )
# Preprocess image
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if clip_guidance_scale > 0:
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = slerp(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = content_text_input.input_ids.shape[-1]
__lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to(
self.device )
else:
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
# check if the scheduler accepts generator
__lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowercase = generator
with self.progress_bar(total=UpperCAmelCase__ ):
for i, t in enumerate(UpperCAmelCase__ ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowercase ,__lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowercase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowercase ,__lowercase = self.cond_fn(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * latents
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
| 17 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :jnp.ndarray
UpperCAmelCase_ :jnp.ndarray
class _SCREAMING_SNAKE_CASE ( nn.Module ):
UpperCAmelCase_ :int
UpperCAmelCase_ :Tuple[int] = (16, 32, 96, 256)
UpperCAmelCase_ :jnp.dtype = jnp.floataa
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase_ :int = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCAmelCase_ :Union[str, Any] = self.block_out_channels[i]
lowerCAmelCase_ :Optional[int] = self.block_out_channels[i + 1]
lowerCAmelCase_ :int = nn.Conv(
__A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__A )
lowerCAmelCase_ :List[str] = nn.Conv(
__A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__A )
lowerCAmelCase_ :Optional[int] = blocks
lowerCAmelCase_ :int = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __A ) -> Tuple:
lowerCAmelCase_ :Dict = self.conv_in(__A )
lowerCAmelCase_ :List[str] = nn.silu(__A )
for block in self.blocks:
lowerCAmelCase_ :Any = block(__A )
lowerCAmelCase_ :Optional[int] = nn.silu(__A )
lowerCAmelCase_ :List[Any] = self.conv_out(__A )
return embedding
@flax_register_to_config
class _SCREAMING_SNAKE_CASE ( nn.Module , A__ , A__ ):
UpperCAmelCase_ :int = 32
UpperCAmelCase_ :int = 4
UpperCAmelCase_ :Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase_ :Union[bool, Tuple[bool]] = False
UpperCAmelCase_ :Tuple[int] = (320, 640, 1280, 1280)
UpperCAmelCase_ :int = 2
UpperCAmelCase_ :Union[int, Tuple[int]] = 8
UpperCAmelCase_ :Optional[Union[int, Tuple[int]]] = None
UpperCAmelCase_ :int = 1280
UpperCAmelCase_ :float = 0.0
UpperCAmelCase_ :bool = False
UpperCAmelCase_ :jnp.dtype = jnp.floataa
UpperCAmelCase_ :bool = True
UpperCAmelCase_ :int = 0
UpperCAmelCase_ :str = "rgb"
UpperCAmelCase_ :Tuple[int] = (16, 32, 96, 256)
def __lowerCAmelCase ( self , __A ) -> FrozenDict:
# init input tensors
lowerCAmelCase_ :Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase_ :Dict = jnp.zeros(__A , dtype=jnp.floataa )
lowerCAmelCase_ :List[Any] = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase_ :Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase_ :Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCAmelCase_ :Optional[int] = jnp.zeros(__A , dtype=jnp.floataa )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = jax.random.split(__A )
lowerCAmelCase_ :Optional[int] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__A , __A , __A , __A , __A )["params"]
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Union[str, Any] = self.block_out_channels
lowerCAmelCase_ :int = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase_ :Dict = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase_ :int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase_ :Optional[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase_ :Optional[Any] = FlaxTimestepEmbedding(__A , dtype=self.dtype )
lowerCAmelCase_ :int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCAmelCase_ :List[str] = self.only_cross_attention
if isinstance(__A , __A ):
lowerCAmelCase_ :List[str] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__A , __A ):
lowerCAmelCase_ :Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase_ :Dict = []
lowerCAmelCase_ :Optional[Any] = []
lowerCAmelCase_ :Dict = block_out_channels[0]
lowerCAmelCase_ :List[Any] = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__A )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase_ :List[Any] = output_channel
lowerCAmelCase_ :List[str] = block_out_channels[i]
lowerCAmelCase_ :Tuple = i == len(__A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase_ :Tuple = FlaxCrossAttnDownBlockaD(
in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCAmelCase_ :Optional[int] = FlaxDownBlockaD(
in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__A )
for _ in range(self.layers_per_block ):
lowerCAmelCase_ :List[str] = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__A )
if not is_final_block:
lowerCAmelCase_ :str = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__A )
lowerCAmelCase_ :List[Any] = down_blocks
lowerCAmelCase_ :Optional[Any] = controlnet_down_blocks
# mid
lowerCAmelCase_ :int = block_out_channels[-1]
lowerCAmelCase_ :List[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=__A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCAmelCase_ :Dict = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __A , __A , __A , __A , __A = 1.0 , __A = True , __A = False , ) -> Union[FlaxControlNetOutput, Tuple]:
lowerCAmelCase_ :Union[str, Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCAmelCase_ :Optional[int] = jnp.flip(__A , axis=1 )
# 1. time
if not isinstance(__A , jnp.ndarray ):
lowerCAmelCase_ :List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__A , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase_ :str = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase_ :Union[str, Any] = jnp.expand_dims(__A , 0 )
lowerCAmelCase_ :List[Any] = self.time_proj(__A )
lowerCAmelCase_ :Optional[Any] = self.time_embedding(__A )
# 2. pre-process
lowerCAmelCase_ :int = jnp.transpose(__A , (0, 2, 3, 1) )
lowerCAmelCase_ :List[Any] = self.conv_in(__A )
lowerCAmelCase_ :Union[str, Any] = jnp.transpose(__A , (0, 2, 3, 1) )
lowerCAmelCase_ :List[str] = self.controlnet_cond_embedding(__A )
sample += controlnet_cond
# 3. down
lowerCAmelCase_ :Any = (sample,)
for down_block in self.down_blocks:
if isinstance(__A , __A ):
lowerCAmelCase_ , lowerCAmelCase_ :Any = down_block(__A , __A , __A , deterministic=not train )
else:
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = down_block(__A , __A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCAmelCase_ :int = self.mid_block(__A , __A , __A , deterministic=not train )
# 5. contronet blocks
lowerCAmelCase_ :Dict = ()
for down_block_res_sample, controlnet_block in zip(__A , self.controlnet_down_blocks ):
lowerCAmelCase_ :Union[str, Any] = controlnet_block(__A )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase_ :Optional[Any] = controlnet_down_block_res_samples
lowerCAmelCase_ :List[Any] = self.controlnet_mid_block(__A )
# 6. scaling
lowerCAmelCase_ :List[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__A , mid_block_res_sample=__A )
| 84 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
__UpperCAmelCase : Tuple = XGLMConfig
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Union[str, Any] = "gelu"
def __init__( self : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=1_4, UpperCAmelCase__ : str=7, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[Any]=True, UpperCAmelCase__ : int=True, UpperCAmelCase__ : List[str]=9_9, UpperCAmelCase__ : Union[str, Any]=3_2, UpperCAmelCase__ : Union[str, Any]=2, UpperCAmelCase__ : Union[str, Any]=4, UpperCAmelCase__ : Tuple=3_7, UpperCAmelCase__ : List[Any]="gelu", UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Tuple=5_1_2, UpperCAmelCase__ : Optional[Any]=0.02, ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = d_model
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = ffn_dim
__lowercase = activation_function
__lowercase = activation_dropout
__lowercase = attention_dropout
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = None
__lowercase = 0
__lowercase = 2
__lowercase = 1
def _lowercase ( self : Union[str, Any] ):
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _lowercase ( self : Tuple ):
__lowercase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = self.get_config()
__lowercase = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self : List[Any] ):
return XGLMConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=UpperCAmelCase__, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=UpperCAmelCase__, )
def _lowercase ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCAmelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def _lowercase ( self : Optional[Any] ):
__lowercase = TFXGLMModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, n_embd=3_7 )
def _lowercase ( self : Any ):
self.config_tester.run_common_tests()
@slow
def _lowercase ( self : List[str] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFXGLMModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _lowercase ( self : int ):
super().test_resize_token_embeddings()
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int]=True ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]], dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowercase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[Any] ):
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
__lowercase = tokenizer("Today is a nice day and", return_tensors="tf" )
__lowercase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, seed=[7, 0] )
__lowercase = tokenizer.decode(output_ids[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
@slow
def _lowercase ( self : Dict ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = "left"
# use different length sentences to test batching
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
__lowercase = tokenizer(UpperCAmelCase__, return_tensors="tf", padding=UpperCAmelCase__ )
__lowercase = inputs["input_ids"]
__lowercase = model.generate(input_ids=UpperCAmelCase__, attention_mask=inputs["attention_mask"], max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[0], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[1], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_non_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, [non_padded_sentence, padded_sentence] )
| 17 | 0 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class _snake_case ( lowercase_ ):
def __init__( self , *a__ , **a__ ) -> Tuple:
'''simple docstring'''
super().__init__(*a__ , **a__ )
def lowerCAmelCase__ ( self , a__ , a__ ) -> List[Any]:
'''simple docstring'''
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(a__ )
snake_case_ = self.values[key]
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return (
sum(self.charge_factor - len(a__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase__ ( self , a__ , a__=None ) -> str:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(a__ ) == 0
):
return key
return super()._collision_resolution(a__ , a__ )
| 85 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_a = '__DUMMY_TRANSFORMERS_USER__'
_a = 'Dummy User'
_a = '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'
_a = 'https://hub-ci.huggingface.co'
_a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
_a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
_a = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _A ( UpperCamelCase_ : List[Any]) -> Tuple:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : int) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT", UpperCamelCase_)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Dict:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : List[Any]) -> List[str]:
'''simple docstring'''
HfFolder.save_token(UpperCamelCase_)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def _A ( ) -> List[Any]:
'''simple docstring'''
return HfApi(endpoint=UpperCamelCase_)
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi) -> List[Any]:
'''simple docstring'''
__lowercase = HfFolder.get_token()
HfFolder.save_token(UpperCamelCase_)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Dict) -> int:
'''simple docstring'''
def _cleanup_repo(UpperCamelCase_ : Optional[int]):
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Any:
'''simple docstring'''
@contextmanager
def _temporary_repo(UpperCamelCase_ : Any):
try:
yield repo_id
finally:
cleanup_repo(UpperCamelCase_)
return _temporary_repo
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : str, UpperCamelCase_ : Optional[int]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data/text_data.txt", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Any, UpperCamelCase_ : Dict) -> Optional[int]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]) -> int:
'''simple docstring'''
__lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Dict, UpperCamelCase_ : Any) -> int:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> str:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 17 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase__ = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : int = "time_series_transformer"
__UpperCAmelCase : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : int, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : str = "student_t", UpperCAmelCase__ : str = "nll", UpperCAmelCase__ : int = 1, UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7], UpperCAmelCase__ : Optional[Union[str, bool]] = "mean", UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : str = "gelu", UpperCAmelCase__ : int = 6_4, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : int = 1_0_0, UpperCAmelCase__ : float = 0.02, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : List[str], ):
# time series specific configuration
__lowercase = prediction_length
__lowercase = context_length or prediction_length
__lowercase = distribution_output
__lowercase = loss
__lowercase = input_size
__lowercase = num_time_features
__lowercase = lags_sequence
__lowercase = scaling
__lowercase = num_dynamic_real_features
__lowercase = num_static_real_features
__lowercase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__lowercase = cardinality
else:
__lowercase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__lowercase = embedding_dimension
else:
__lowercase = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality]
__lowercase = num_parallel_samples
# Transformer architecture configuration
__lowercase = input_size * len(UpperCAmelCase__ ) + self._number_of_features
__lowercase = d_model
__lowercase = encoder_attention_heads
__lowercase = decoder_attention_heads
__lowercase = encoder_ffn_dim
__lowercase = decoder_ffn_dim
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = activation_function
__lowercase = init_std
__lowercase = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__, **UpperCAmelCase__ )
@property
def _lowercase ( self : Optional[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 17 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class snake_case_ ( unittest.TestCase ):
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[Any]=7 , lowercase_ : List[str]=3 , lowercase_ : int=30 , lowercase_ : List[Any]=4_00 , lowercase_ : Dict=True , lowercase_ : Optional[int]=None , lowercase_ : Any=True , lowercase_ : List[str]=[0.5, 0.5, 0.5] , lowercase_ : List[str]=[0.5, 0.5, 0.5] , lowercase_ : Any=True , lowercase_ : Optional[int]=1 / 2_55 , lowercase_ : str=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase__ : Optional[int] = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
lowercase__ : List[str] = parent
lowercase__ : str = batch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = min_resolution
lowercase__ : str = max_resolution
lowercase__ : Optional[Any] = do_resize
lowercase__ : List[Any] = size
lowercase__ : int = do_normalize
lowercase__ : List[Any] = image_mean
lowercase__ : List[str] = image_std
lowercase__ : Optional[Any] = do_rescale
lowercase__ : Dict = rescale_factor
lowercase__ : Union[str, Any] = do_pad
def __UpperCamelCase ( self : Any ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=False ) -> Any:
if not batched:
lowercase__ : Dict = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowercase__ , lowercase__ : List[Any] = image.size
else:
lowercase__ , lowercase__ : List[str] = image.shape[1], image.shape[2]
if w < h:
lowercase__ : Any = int(self.size["shortest_edge"] * h / w )
lowercase__ : Union[str, Any] = self.size["shortest_edge"]
elif w > h:
lowercase__ : str = self.size["shortest_edge"]
lowercase__ : Tuple = int(self.size["shortest_edge"] * w / h )
else:
lowercase__ : Any = self.size["shortest_edge"]
lowercase__ : Tuple = self.size["shortest_edge"]
else:
lowercase__ : int = []
for image in image_inputs:
lowercase__ , lowercase__ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowercase__ : Any = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case_ ( __A ,unittest.TestCase ):
__A : str = DeformableDetrImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : List[Any] ) -> Any:
lowercase__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , "image_mean" ) )
self.assertTrue(hasattr(lowercase_ , "image_std" ) )
self.assertTrue(hasattr(lowercase_ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase_ , "do_resize" ) )
self.assertTrue(hasattr(lowercase_ , "do_rescale" ) )
self.assertTrue(hasattr(lowercase_ , "do_pad" ) )
self.assertTrue(hasattr(lowercase_ , "size" ) )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowercase__ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def __UpperCamelCase ( self : Dict ) -> str:
pass
def __UpperCamelCase ( self : List[str] ) -> Any:
# Initialize image_processing
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowercase__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__ : Optional[Any] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowercase__ : str = image_processing(lowercase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : List[str] ) -> int:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowercase__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : List[str] = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Tuple = image_processing(lowercase_ , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : Any = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : str ) -> int:
# Initialize image_processing
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : int = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Union[str, Any] = image_processing(lowercase_ , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
# prepare image and target
lowercase__ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowercase__ : List[Any] = json.loads(f.read() )
lowercase__ : Dict = {"image_id": 3_97_69, "annotations": target}
# encode them
lowercase__ : List[str] = DeformableDetrImageProcessor()
lowercase__ : Optional[int] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="pt" )
# verify pixel values
lowercase__ : Union[str, Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowercase_ )
lowercase__ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) )
# verify area
lowercase__ : Union[str, Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase_ ) )
# verify boxes
lowercase__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase_ )
lowercase__ : Optional[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase_ , atol=1E-3 ) )
# verify image_id
lowercase__ : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase_ ) )
# verify is_crowd
lowercase__ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase_ ) )
# verify class_labels
lowercase__ : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase_ ) )
# verify orig_size
lowercase__ : Tuple = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase_ ) )
# verify size
lowercase__ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase_ ) )
@slow
def __UpperCamelCase ( self : str ) -> Tuple:
# prepare image, target and masks_path
lowercase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowercase__ : Optional[Any] = json.loads(f.read() )
lowercase__ : Union[str, Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
lowercase__ : Optional[int] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowercase__ : Dict = DeformableDetrImageProcessor(format="coco_panoptic" )
lowercase__ : Optional[Any] = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="pt" )
# verify pixel values
lowercase__ : Tuple = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowercase_ )
lowercase__ : int = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) )
# verify area
lowercase__ : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase_ ) )
# verify boxes
lowercase__ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase_ )
lowercase__ : Dict = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase_ , atol=1E-3 ) )
# verify image_id
lowercase__ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase_ ) )
# verify is_crowd
lowercase__ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase_ ) )
# verify class_labels
lowercase__ : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase_ ) )
# verify masks
lowercase__ : str = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowercase_ )
# verify orig_size
lowercase__ : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase_ ) )
# verify size
lowercase__ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase_ ) )
| 87 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : List[Any] ):
pass
def _A ( UpperCamelCase_ : Union[str, Any]) -> Any:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_a = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any] ):
__lowercase = pipeline(
"document-question-answering", model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
__lowercase = INVOICE_URL
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
__lowercase = "What is the placebo?"
__lowercase = [
{
"image": load_image(UpperCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _lowercase ( self : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any ):
__lowercase = dqa_pipeline(UpperCAmelCase__, top_k=2 )
self.assertEqual(
UpperCAmelCase__, [
[
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
]
]
* 3, )
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2" )
__lowercase = INVOICE_URL
__lowercase = "How many cats are there?"
__lowercase = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 3_8, "end": 3_9},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 3_8, "end": 4_0},
]
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
# We can optionnally pass directly the words and bounding boxes
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = []
__lowercase = []
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, words=UpperCAmelCase__, boxes=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : List[str] ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
],
]
* 2, )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Union[str, Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
@slow
@require_torch
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _lowercase ( self : List[Any] ):
pass
| 17 | 0 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=None , **UpperCamelCase__ : List[Any] ) -> int:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = config_class
__magic_name__ = has_text_modality
__magic_name__ = kwargs
__magic_name__ = common_properties
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = self.config_class(**self.inputs_dict )
__magic_name__ = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCamelCase__ ):
try:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.parent.assertEqual(
getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , msg=F'''`{name} value {idx} expected, but was {getattr(UpperCamelCase__ , UpperCamelCase__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCamelCase__ ):
try:
__magic_name__ = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , msg=F'''`{name} value {idx} expected, but was {getattr(UpperCamelCase__ , UpperCamelCase__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowercase ( self : List[str] ) -> Any:
"""simple docstring"""
__magic_name__ = self.config_class(**self.inputs_dict )
__magic_name__ = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = os.path.join(UpperCamelCase__ , """config.json""" )
config_first.to_json_file(UpperCamelCase__ )
__magic_name__ = self.config_class.from_json_file(UpperCamelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCamelCase__ )
__magic_name__ = self.config_class.from_pretrained(UpperCamelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.config_class(**self.inputs_dict )
__magic_name__ = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
config_first.save_pretrained(UpperCamelCase__ )
__magic_name__ = self.config_class.from_pretrained(UpperCamelCase__ , subfolder=UpperCamelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__magic_name__ = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.config_class.is_composition:
return
__magic_name__ = self.config_class()
self.parent.assertIsNotNone(UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = copy.deepcopy(UpperCamelCase__ )
__magic_name__ = self.config_class(**UpperCamelCase__ )
__magic_name__ = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(UpperCamelCase__ , UpperCamelCase__ ) != value:
wrong_values.append((key, getattr(UpperCamelCase__ , UpperCamelCase__ ), value) )
if len(UpperCamelCase__ ) > 0:
__magic_name__ = """\n""".join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def _lowercase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 88 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict, *, # begin keyword-only arguments
UpperCAmelCase__ : str="<s>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : str="</s>", UpperCAmelCase__ : Optional[Any]="<unk>", UpperCAmelCase__ : List[Any]=None, ):
__lowercase ,__lowercase ,__lowercase ,__lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase__ )
__lowercase = len(self.symbols )
def __eq__( self : List[str], UpperCAmelCase__ : Dict ):
return self.indices == other.indices
def __getitem__( self : Optional[int], UpperCAmelCase__ : List[str] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ):
return len(self.symbols )
def __contains__( self : Any, UpperCAmelCase__ : Optional[Any] ):
return sym in self.indices
@classmethod
def _lowercase ( cls : List[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = cls()
d.add_from_file(UpperCAmelCase__ )
return d
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : str=False ):
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(UpperCAmelCase__ )
self.count.append(UpperCAmelCase__ )
return idx
def _lowercase ( self : Any, UpperCAmelCase__ : str ):
return 0
def _lowercase ( self : Tuple, UpperCAmelCase__ : List[Any] ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
try:
with open(UpperCAmelCase__, "r", encoding="utf-8" ) as fd:
self.add_from_file(UpperCAmelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCAmelCase__ ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(UpperCAmelCase__ )
for line in lines[indices_start_line:]:
try:
__lowercase ,__lowercase = line.rstrip().rsplit(" ", 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase ,__lowercase = line.rsplit(" ", 1 )
else:
__lowercase = False
__lowercase = int(UpperCAmelCase__ )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(UpperCAmelCase__ ) )
self.add_symbol(UpperCAmelCase__, n=UpperCAmelCase__, overwrite=UpperCAmelCase__ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def _A ( UpperCamelCase_ : int) -> str:
'''simple docstring'''
__lowercase = dict((re.sub(r"@@$", "", UpperCamelCase_), v) if k.endswith("@@") else (re.sub(r"$", "</w>", UpperCamelCase_), v) for k, v in d.items())
__lowercase = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowercase = d[k] # restore
return da
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> List[Any]:
'''simple docstring'''
if not os.path.exists(UpperCamelCase_):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""")
os.makedirs(UpperCamelCase_, exist_ok=UpperCamelCase_)
print(F"""Writing results to {pytorch_dump_folder_path}""")
# handle various types of models
__lowercase = os.path.join(UpperCamelCase_, "checkpoint.pt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""")
__lowercase = torch.load(UpperCamelCase_, map_location="cpu")
__lowercase = chkpt["cfg"]["model"]
# dicts
__lowercase = os.path.join(UpperCamelCase_, "dict.txt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {dict_file} does not exist!""")
__lowercase = Dictionary.load(UpperCamelCase_)
__lowercase = rewrite_dict_keys(src_dict.indices)
__lowercase = len(UpperCamelCase_)
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["vocab_file"])
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# merges_file (bpecodes)
__lowercase = os.path.join(UpperCamelCase_, "bpecodes")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""")
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["merges_file"])
shutil.copyfile(UpperCamelCase_, UpperCamelCase_)
# model config
__lowercase = os.path.join(UpperCamelCase_, "config.json")
__lowercase = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# tokenizer config
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
__lowercase = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F"""Generating {biogpt_tokenizer_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# model
__lowercase = chkpt["model"]
# remove unneeded keys
__lowercase = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase_, UpperCamelCase_)
__lowercase = list(model_state_dict.keys())
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight"):
__lowercase = model_state_dict.pop(UpperCamelCase_)
else:
__lowercase = model_state_dict.pop(UpperCamelCase_)
__lowercase = BioGptConfig.from_pretrained(UpperCamelCase_)
__lowercase = BioGptForCausalLM(UpperCamelCase_)
# check that it loads ok
model_new.load_state_dict(UpperCamelCase_)
# save
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
print(F"""Generating {pytorch_weights_dump_path}""")
torch.save(UpperCamelCase_, UpperCamelCase_)
print("Conversion is done!")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 17 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[int] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Optional[int]=False ,_UpperCAmelCase : Dict="<s>" ,_UpperCAmelCase : Tuple="</s>" ,_UpperCAmelCase : Dict="<unk>" ,_UpperCAmelCase : Dict="<sep>" ,_UpperCAmelCase : List[str]="<pad>" ,_UpperCAmelCase : List[Any]="<cls>" ,_UpperCAmelCase : Union[str, Any]="<mask>" ,_UpperCAmelCase : Optional[Any]=["<eop>", "<eod>"] ,_UpperCAmelCase : Optional[Dict[str, Any]] = None ,**_UpperCAmelCase : Union[str, Any] ,):
_a : List[Any] = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else mask_token
_a : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase ,remove_space=_UpperCAmelCase ,keep_accents=_UpperCAmelCase ,bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,additional_special_tokens=_UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_UpperCAmelCase ,)
_a : List[Any] = 3
_a : Optional[int] = do_lower_case
_a : Dict = remove_space
_a : Union[str, Any] = keep_accents
_a : Any = vocab_file
_a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.' )
_a : Optional[int] = jieba
_a : Union[str, Any] = str.maketrans(' \n' ,'\u2582\u2583' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowercase ( self : Dict ):
return len(self.sp_model )
def __lowercase ( self : Dict ):
_a : Optional[Any] = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
_a : Tuple = self.__dict__.copy()
_a : Any = None
return state
def __setstate__( self : Any ,_UpperCAmelCase : Optional[int] ):
_a : Any = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : Optional[int] = {}
_a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : str ):
if self.remove_space:
_a : List[str] = ' '.join(inputs.strip().split() )
else:
_a : int = inputs
_a : str = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_a : Dict = unicodedata.normalize('NFKD' ,_UpperCAmelCase )
_a : List[str] = ''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
_a : Dict = outputs.lower()
return outputs
def __lowercase ( self : List[str] ,_UpperCAmelCase : str ):
_a : int = self.preprocess_text(_UpperCAmelCase )
_a : Dict = self.sp_model.encode(_UpperCAmelCase ,out_type=_UpperCAmelCase )
_a : Optional[Any] = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : Optional[int] = cur_pieces[1:]
else:
_a : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Union[str, Any] ):
return self.sp_model.PieceToId(_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[str] ):
return self.sp_model.IdToPiece(_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Any ):
_a : Dict = ''.join(_UpperCAmelCase ).replace(_UpperCAmelCase ,' ' ).strip()
return out_string
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
_a : List[str] = [self.sep_token_id]
_a : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ,_UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1]
return ([0] * len(_UpperCAmelCase )) + [1, 1]
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
_a : Union[str, Any] = [self.sep_token_id]
_a : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowercase ( self : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : List[Any] = os.path.join(
_UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase ,'wb' ) as fi:
_a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def __lowercase ( self : int ,*_UpperCAmelCase : Dict ,**_UpperCAmelCase : List[Any] ):
_a : List[str] = super()._decode(*_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[str] = text.replace(' ' ,'' ).replace('\u2582' ,' ' ).replace('\u2583' ,'\n' )
return text
| 89 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any, UpperCAmelCase__ : int ):
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
self.m_edges.append([u_node, v_node, weight] )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowercase ( self : List[Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase__ )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(UpperCAmelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase__ )
def _lowercase ( self : Any ):
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _A ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__A = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = 'weight'
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=None ) -> Any:
"""simple docstring"""
__lowerCamelCase = torch.load(UpperCamelCase__ )
__lowerCamelCase = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCamelCase = WavLMOrig(UpperCamelCase__ )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCamelCase = WavLMConfig.from_pretrained(UpperCamelCase__ )
else:
__lowerCamelCase = WavLMConfig()
__lowerCamelCase = WavLMModel(UpperCamelCase__ )
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ )
hf_wavlm.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__A = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 90 |
"""simple docstring"""
from math import sqrt
def _A ( UpperCamelCase_ : int) -> int:
'''simple docstring'''
__lowercase = 0
for i in range(1, int(sqrt(UpperCamelCase_) + 1)):
if n % i == 0 and i != sqrt(UpperCamelCase_):
total += i + n // i
elif i == sqrt(UpperCamelCase_):
total += i
return total - n
def _A ( UpperCamelCase_ : int = 10000) -> int:
'''simple docstring'''
__lowercase = sum(
i
for i in range(1, UpperCamelCase_)
if sum_of_divisors(sum_of_divisors(UpperCamelCase_)) == i and sum_of_divisors(UpperCamelCase_) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 17 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _A (__a="" ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = tempfile.mkdtemp()
return os.path.join(__a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.rand(12 , dtype=torch.floataa) - 0.5
SCREAMING_SNAKE_CASE_ : Optional[Any] = AgentAudio(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1e-4))
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase_))
# Ensure that the file contains the same value as the original tensor
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = sf.read(lowercase_)
self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_) , atol=1e-4))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = torch.rand(12 , dtype=torch.floataa) - 0.5
SCREAMING_SNAKE_CASE_ : int = get_new_path(suffix='''.wav''')
sf.write(lowercase_ , lowercase_ , 16000)
SCREAMING_SNAKE_CASE_ : int = AgentAudio(lowercase_)
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1e-4))
self.assertEqual(agent_type.to_string() , lowercase_)
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = torch.randint(0 , 256 , (64, 64, 3))
SCREAMING_SNAKE_CASE_ : Dict = AgentImage(lowercase_)
SCREAMING_SNAKE_CASE_ : int = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1e-4))
self.assertIsInstance(agent_type.to_raw() , Image.Image)
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(get_tests_dir('''fixtures/tests_samples/COCO''')) / '''000000039769.png'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Image.open(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = AgentImage(lowercase_)
self.assertTrue(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''')) / '''000000039769.png'''
SCREAMING_SNAKE_CASE_ : List[str] = Image.open(lowercase_)
SCREAMING_SNAKE_CASE_ : int = AgentImage(lowercase_)
self.assertFalse(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_))
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = '''Hey!'''
SCREAMING_SNAKE_CASE_ : int = AgentText(lowercase_)
self.assertEqual(lowercase_ , agent_type.to_string())
self.assertEqual(lowercase_ , agent_type.to_raw())
self.assertEqual(lowercase_ , lowercase_)
| 91 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a = _symbol_database.Default()
_a = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
_a = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a = None
_a = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a = 45
_a = 15_81
_a = 15_17
_a = 15_70
_a = 15_84
_a = 17_93
_a = 17_95
_a = 19_16
_a = 18_64
_a = 19_05
_a = 19_19
_a = 24_29
_a = 22_08
_a = 24_18
_a = 23_23
_a = 24_07
# @@protoc_insertion_point(module_scope)
| 17 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : str ):
__lowerCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowerCAmelCase = 1_92
__lowerCAmelCase = 7_68
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = [8_00, 13_33]
__lowerCAmelCase = False
elif yolos_name == "yolos_s_dWr":
__lowerCAmelCase = 3_30
__lowerCAmelCase = 14
__lowerCAmelCase = 6
__lowerCAmelCase = 13_20
elif "yolos_s" in yolos_name:
__lowerCAmelCase = 3_84
__lowerCAmelCase = 15_36
__lowerCAmelCase = 12
__lowerCAmelCase = 6
elif "yolos_b" in yolos_name:
__lowerCAmelCase = [8_00, 13_44]
__lowerCAmelCase = 91
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "coco-detection-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: config.hidden_size, :]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[-config.hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def _a ( SCREAMING_SNAKE_CASE_ : str ):
if "backbone" in name:
__lowerCAmelCase = name.replace("backbone" , "vit" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
__lowerCAmelCase = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
__lowerCAmelCase = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
__lowerCAmelCase = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowerCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowerCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowerCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
__lowerCAmelCase = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
__lowerCAmelCase = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
__lowerCAmelCase = name.replace("vit.norm" , "vit.layernorm" )
return name
def _a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ):
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
__lowerCAmelCase = key.split("." )
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = val
return orig_state_dict
def _a ( ):
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
__lowerCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
# load 🤗 model
__lowerCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
__lowerCAmelCase = 8_00 if yolos_name != "yolos_ti" else 5_12
__lowerCAmelCase = YolosImageProcessor(format="coco_detection" , size=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = outputs.logits, outputs.pred_boxes
__lowerCAmelCase , __lowerCAmelCase = None, None
if yolos_name == "yolos_ti":
__lowerCAmelCase = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
__lowerCAmelCase = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
__lowerCAmelCase = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
__lowerCAmelCase = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
__lowerCAmelCase = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
__lowerCAmelCase = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
__lowerCAmelCase = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
__lowerCAmelCase = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
__lowerCAmelCase = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
__lowerCAmelCase = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
__lowerCAmelCase = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
__lowerCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="hustvl" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="hustvl" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 92 |
"""simple docstring"""
import baseaa
def _A ( UpperCamelCase_ : str) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8"))
def _A ( UpperCamelCase_ : bytes) -> str:
'''simple docstring'''
return baseaa.baadecode(UpperCamelCase_).decode("utf-8")
if __name__ == "__main__":
_a = 'Hello World!'
_a = baseaa_encode(test)
print(encoded)
_a = baseaa_decode(encoded)
print(decoded)
| 17 | 0 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(__SCREAMING_SNAKE_CASE ) for s in shape] )}.npy'''
def _snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def _snake_case ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=(4, 4, 64, 64) , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase_ : Any = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ : int = jnp.array(load_hf_numpy(self.get_file_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , dtype=__SCREAMING_SNAKE_CASE )
return image
def _snake_case ( self , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="CompVis/stable-diffusion-v1-4" ):
"""simple docstring"""
lowercase_ : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ : Union[str, Any] = '''bf16''' if fpaa else None
lowercase_ , lowercase_ : Dict = FlaxUNetaDConditionModel.from_pretrained(
__SCREAMING_SNAKE_CASE , subfolder='''unet''' , dtype=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE )
return model, params
def _snake_case ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=(4, 77, 7_68) , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase_ : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ : List[str] = jnp.array(load_hf_numpy(self.get_file_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , dtype=__SCREAMING_SNAKE_CASE )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 10_00, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ , lowercase_ : Tuple = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = self.get_latents(__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.get_encoder_hidden_states(__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = model.apply(
{'''params''': params} , __SCREAMING_SNAKE_CASE , jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.intaa ) , encoder_hidden_states=__SCREAMING_SNAKE_CASE , ).sample
assert sample.shape == latents.shape
lowercase_ : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase_ : Tuple = jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 10_00, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ , lowercase_ : Union[str, Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = self.get_latents(__SCREAMING_SNAKE_CASE , shape=(4, 4, 96, 96) , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.get_encoder_hidden_states(__SCREAMING_SNAKE_CASE , shape=(4, 77, 10_24) , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = model.apply(
{'''params''': params} , __SCREAMING_SNAKE_CASE , jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.intaa ) , encoder_hidden_states=__SCREAMING_SNAKE_CASE , ).sample
assert sample.shape == latents.shape
lowercase_ : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase_ : List[Any] = jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-2 )
| 93 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Any) -> List[str]:
'''simple docstring'''
__lowercase ,__lowercase = [], []
while len(UpperCamelCase_) > 1:
__lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_)
start.append(UpperCamelCase_)
end.append(UpperCamelCase_)
collection.remove(UpperCamelCase_)
collection.remove(UpperCamelCase_)
end.reverse()
return start + collection + end
if __name__ == "__main__":
_a = input('Enter numbers separated by a comma:\n').strip()
_a = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case : List[Any] = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 94 |
"""simple docstring"""
def _A ( UpperCamelCase_ : list[int]) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty")
__lowercase = sum(UpperCamelCase_) / len(UpperCamelCase_) # Calculate the average
return sum(abs(x - average) for x in nums) / len(UpperCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
import math
def _A ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 ):
"""simple docstring"""
a__ : Union[str, Any] =end or len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ : Dict =i
a__ : Optional[int] =array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
a__ : Tuple =array[temp_index - 1]
temp_index -= 1
a__ : Any =temp_index_value
return array
def _A ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): # Max Heap
"""simple docstring"""
a__ : Optional[int] =index
a__ : Any =2 * index + 1 # Left Node
a__ : Tuple =2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
a__ : int =left_index
if right_index < heap_size and array[largest] < array[right_index]:
a__ : int =right_index
if largest != index:
a__ , a__ : str =array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
a__ : Any =len(SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
a__ , a__ : Optional[Any] =array[0], array[i]
heapify(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE )
return array
def _A ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _A ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : str =low
a__ : List[Any] =high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
a__ , a__ : Tuple =array[j], array[i]
i += 1
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return array
a__ : str =2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE ) ) )
a__ : str =16
return intro_sort(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE )
max_depth -= 1
a__ : Dict =median_of_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
a__ : Any =partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
intro_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Tuple =p
return insertion_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : Optional[Any] = input("""Enter numbers separated by a comma : """).strip()
UpperCAmelCase : Dict = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 95 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ):
__lowercase = parent
__lowercase = vocab_size
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def _lowercase ( self : int ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size )
__lowercase = BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, )
return config, pixel_values, labels
def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ):
__lowercase = FlaxBeitModel(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ):
__lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ):
__lowercase = self.type_sequence_label_size
__lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _lowercase ( self : List[Any] ):
__lowercase = FlaxBeitModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(UpperCAmelCase__ )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1], UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ):
return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape, output.shape )
def _lowercase ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(UpperCAmelCase__ )
def _A ( ) -> str:
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_vision
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values
# prepare bool_masked_pos
__lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ )
# forward pass
__lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) )
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_0_0_0)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_8_1
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[str] ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 2_1_8_4_1)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
| 17 | 0 |
"""simple docstring"""
from math import isqrt
def _snake_case ( lowercase__ ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) )
def _snake_case ( lowercase__ = 10**6 ):
_lowerCamelCase : str = 0
_lowerCamelCase : int = 1
_lowerCamelCase : Union[str, Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }") | 96 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowerCAmelCase ( unittest.TestCase ,lowercase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
__lowercase = load_tool("text-classification" )
self.tool.setup()
__lowercase = load_tool("text-classification", remote=UpperCAmelCase__ )
def _lowercase ( self : str ):
__lowercase = self.tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : str ):
__lowercase = self.remote_tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : List[str] ):
__lowercase = self.tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : Tuple ):
__lowercase = self.remote_tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
| 17 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowercase ( A__ ):
"""simple docstring"""
_a = 'microsoft/speecht5_tts'
_a = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
_a = 'text_reader'
_a = SpeechTaProcessor
_a = SpeechTaForTextToSpeech
_a = SpeechTaHifiGan
_a = ['text']
_a = ['audio']
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.post_processor is None:
UpperCamelCase__ :int = '''microsoft/speecht5_hifigan'''
super().setup()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :str = self.pre_processor(text=UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
UpperCamelCase__ :Union[str, Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
UpperCamelCase__ :int = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(UpperCamelCase_ ).cpu().detach() | 97 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_a = 'CompVis/stable-diffusion-v1-1'
_a = 'CompVis/stable-diffusion-v1-2'
_a = 'CompVis/stable-diffusion-v1-3'
_a = 'CompVis/stable-diffusion-v1-4'
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCAmelCase__ : StableDiffusionSafetyChecker, UpperCAmelCase__ : CLIPImageProcessor, UpperCAmelCase__ : bool = True, ):
super()._init_()
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, requires_safety_checker=UpperCAmelCase__, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea )
@property
def _lowercase ( self : List[str] ):
return {k: getattr(self, UpperCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )}
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Tuple, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Any, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Optional[int], ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
__lowercase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(UpperCAmelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 17 | 0 |
"""simple docstring"""
def a_ ( ):
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
lowerCAmelCase__ : Any = generate_large_matrix()
lowerCAmelCase__ : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def a_ ( lowerCamelCase ):
assert all(row == sorted(lowerCamelCase , reverse=lowerCamelCase ) for row in grid )
assert all(list(lowerCamelCase ) == sorted(lowerCamelCase , reverse=lowerCamelCase ) for col in zip(*lowerCamelCase ) )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(lowerCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase__ = (left + right) // 2
UpperCAmelCase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase__ = mid + 1
else:
UpperCAmelCase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(grid[0] )
for i in range(len(lowerCamelCase ) ):
UpperCAmelCase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowerCamelCase ) * len(grid[0] )) - total
def a_ ( lowerCamelCase ):
return len([number for row in grid for number in row if number < 0] )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = 0
for row in grid:
for i, number in enumerate(lowerCamelCase ):
if number < 0:
total += len(lowerCamelCase ) - i
break
return total
def a_ ( ):
from timeit import timeit
print('Running benchmarks' )
UpperCAmelCase__ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase__ = timeit(f'''{func}(grid=grid)''' , setup=lowerCamelCase , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 98 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = "ssube/stable-diffusion-x4-upscaler-onnx"
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[str]=0 ):
__lowercase = floats_tensor((1, 3, 1_2_8, 1_2_8), rng=random.Random(UpperCAmelCase__ ) )
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _lowercase ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : int ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : str ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : Dict ):
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _lowercase ( self : Dict ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowercase ( self : str ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
__lowercase = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" )
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", scheduler=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 17 | 0 |
def A_ ( A__ ) -> int:
a__ : list[list[int]] = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
a__ : Any = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowercase : Any = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
lowercase : Tuple = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 99 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_a = datasets.utils.logging.get_logger(__name__)
_a = ['names', 'prefix']
_a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_a = ['encoding_errors', 'on_bad_lines']
_a = ['date_format']
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : str = ","
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[Union[int, List[int], str]] = "infer"
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[Union[int, str, List[int], List[str]]] = None
__UpperCAmelCase : Optional[Union[List[int], List[str]]] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[Literal["c", "python", "pyarrow"]] = None
__UpperCAmelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[Union[int, List[int]]] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[Union[str, List[str]]] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = "."
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = '"'
__UpperCAmelCase : int = 0
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : int = 1_0_0_0_0
__UpperCAmelCase : Optional[datasets.Features] = None
__UpperCAmelCase : Optional[str] = "strict"
__UpperCAmelCase : Literal["error", "warn", "skip"] = "error"
__UpperCAmelCase : Optional[str] = None
def _lowercase ( self : Tuple ):
if self.delimiter is not None:
__lowercase = self.delimiter
if self.column_names is not None:
__lowercase = self.column_names
@property
def _lowercase ( self : Union[str, Any] ):
__lowercase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), UpperCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : Tuple = CsvConfig
def _lowercase ( self : List[str] ):
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowercase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase__, (str, list, tuple) ):
__lowercase = data_files
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )]
__lowercase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__, gen_kwargs={"files": files} ) )
return splits
def _lowercase ( self : Dict, UpperCAmelCase__ : pa.Table ):
if self.config.features is not None:
__lowercase = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
__lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=UpperCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__lowercase = table_cast(UpperCAmelCase__, UpperCAmelCase__ )
return pa_table
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : List[str] ):
__lowercase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__lowercase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ):
__lowercase = pd.read_csv(UpperCAmelCase__, iterator=UpperCAmelCase__, dtype=UpperCAmelCase__, **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCAmelCase__ ):
__lowercase = pa.Table.from_pandas(UpperCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase__ )}: {e}""" )
raise
| 17 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ):
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(UpperCamelCase_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : Union[str, Any] = OPTConfig
__lowercase : Union[str, Any] = {}
__lowercase : List[Any] = '''gelu'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=9_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_0 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=1_6 , lowerCAmelCase__=1_6 , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = word_embed_proj_dim
__SCREAMING_SNAKE_CASE = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1)
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase__ , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_opt_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__)
return config, inputs_dict
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TFOPTModel(config=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""]
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""][:1, :]
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size)
__SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1)
__SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)[0]
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
__SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1]))
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3)
@require_tf
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : List[str] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__lowercase : Any = (TFOPTForCausalLM,) if is_tf_available() else ()
__lowercase : int = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__lowercase : List[str] = False
__lowercase : Union[str, Any] = False
__lowercase : Tuple = False
__lowercase : Union[str, Any] = 10
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFOPTModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase__ , lowerCAmelCase__):
if hasattr(lowerCAmelCase__ , """weight"""):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase__ , """weight"""):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
__SCREAMING_SNAKE_CASE = model_class(config=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings())
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings())
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings())
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings())
# check that the resized embeddings size matches the desired size.
__SCREAMING_SNAKE_CASE = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase__)
# check that weights remain the same after resizing
__SCREAMING_SNAKE_CASE = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
__SCREAMING_SNAKE_CASE = False
self.assertTrue(lowerCAmelCase__)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
__SCREAMING_SNAKE_CASE = False
self.assertTrue(lowerCAmelCase__)
def _lowerCAmelCase ( UpperCamelCase_ ):
return tf.constant(UpperCamelCase_ , dtype=tf.intaa )
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
__lowercase : Union[str, Any] = 99
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = tf.ones((4, 1) , dtype=tf.intaa) * 2
__SCREAMING_SNAKE_CASE = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3) + 3, eos_column_vector] , axis=1)
__SCREAMING_SNAKE_CASE = input_ids.shape[0]
__SCREAMING_SNAKE_CASE = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFOPTModel.from_pretrained("""facebook/opt-350m""")
__SCREAMING_SNAKE_CASE = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
__SCREAMING_SNAKE_CASE = tf.not_equal(lowerCAmelCase__ , model.config.pad_token_id)
with tf.GradientTape():
__SCREAMING_SNAKE_CASE = model(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__).last_hidden_state
__SCREAMING_SNAKE_CASE = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]])
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-3))
__SCREAMING_SNAKE_CASE = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = xla_generate(lowerCAmelCase__ , lowerCAmelCase__)[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-2))
@require_tf
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = """facebook/opt-350m"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(self.path_model)
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(self.path_model)
__SCREAMING_SNAKE_CASE = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors="""tf""" , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
__SCREAMING_SNAKE_CASE = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
])
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4))
__SCREAMING_SNAKE_CASE = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4))
@require_tf
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case_ ( self):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """facebook/opt-125m"""
__SCREAMING_SNAKE_CASE = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__)
for prompt in self.prompts:
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , max_length=1_0)
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """facebook/opt-350m"""
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """left"""
# use different length sentences to test batching
__SCREAMING_SNAKE_CASE = [
"""Hello, my dog is a little""",
"""Today, I""",
]
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors="""tf""" , padding=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inputs["""input_ids"""]
__SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["""attention_mask"""])
__SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa))
__SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , max_length=model.config.max_length - num_paddings)
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """facebook/opt-350m"""
__SCREAMING_SNAKE_CASE = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__)
for prompt in self.prompts:
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , max_length=1_0)
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 100 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
_a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
_a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
_a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ):
__lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 17 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.