code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : bytes ) -> Optional[Any]:
"""simple docstring"""
return "".join([hex(__lowerCamelCase )[2:].zfill(2 ).upper() for byte in list(__lowerCamelCase )] )
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
if (len(__lowerCamelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__lowerCamelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__lowerCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_lowercase : Tuple = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = ['pixel_values']
def __init__( self : Optional[Any], lowerCamelCase : bool = True, lowerCamelCase : Union[int, float] = 1 / 255, lowerCamelCase : bool = True, lowerCamelCase : int = 8, **lowerCamelCase : Tuple, )-> None:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : int =do_rescale
lowerCamelCase__ : Dict =rescale_factor
lowerCamelCase__ : Union[str, Any] =do_pad
lowerCamelCase__ : Union[str, Any] =pad_size
def snake_case ( self : int, lowerCamelCase : np.ndarray, lowerCamelCase : float, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : int )-> np.ndarray:
return rescale(lowerCamelCase, scale=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Optional[Any], lowerCamelCase : np.ndarray, lowerCamelCase : int, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =get_image_size(lowerCamelCase )
lowerCamelCase__ : List[str] =(old_height // size + 1) * size - old_height
lowerCamelCase__ : List[str] =(old_width // size + 1) * size - old_width
return pad(lowerCamelCase, ((0, pad_height), (0, pad_width)), mode='''symmetric''', data_format=lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : ImageInput, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[float] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST, **lowerCamelCase : Union[str, Any], )-> Dict:
lowerCamelCase__ : List[str] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : str =do_pad if do_pad is not None else self.do_pad
lowerCamelCase__ : int =pad_size if pad_size is not None else self.pad_size
lowerCamelCase__ : Optional[int] =make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Tuple =[to_numpy_array(lowerCamelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : Tuple =[self.rescale(image=lowerCamelCase, scale=lowerCamelCase ) for image in images]
if do_pad:
lowerCamelCase__ : Tuple =[self.pad(lowerCamelCase, size=lowerCamelCase ) for image in images]
lowerCamelCase__ : int =[to_channel_dimension_format(lowerCamelCase, lowerCamelCase ) for image in images]
lowerCamelCase__ : Dict ={'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase )
| 238 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = [False] * len(__A )
UpperCAmelCase__ = []
queue.append(__A )
UpperCAmelCase__ = True
while queue:
UpperCAmelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__A )
UpperCAmelCase__ = True
UpperCAmelCase__ = u
return visited[t]
def lowerCAmelCase_ ( __A, __A, __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = [-1] * (len(__A ))
UpperCAmelCase__ = 0
while bfs(__A, __A, __A, __A ):
UpperCAmelCase__ = float("Inf" )
UpperCAmelCase__ = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase__ = min(__A, graph[parent[s]][s] )
UpperCAmelCase__ = parent[s]
max_flow += path_flow
UpperCAmelCase__ = sink
while v != source:
UpperCAmelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase__ = parent[v]
return max_flow
UpperCamelCase__ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCamelCase__ , UpperCamelCase__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 350 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = 'facebook/bart-large-mnli'
__UpperCAmelCase : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__UpperCAmelCase : Optional[int] = 'text_classifier'
__UpperCAmelCase : int = AutoTokenizer
__UpperCAmelCase : Dict = AutoModelForSequenceClassification
__UpperCAmelCase : int = ['text', ['text']]
__UpperCAmelCase : Optional[int] = ['text']
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
super().setup()
UpperCAmelCase__ = self.model.config
UpperCAmelCase__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCAmelCase__ = int(__UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = labels
return self.pre_processor(
[text] * len(__UpperCAmelCase ) , [f"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 143 | 0 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowercase__ = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
lowercase__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
lowercase__ = BeautifulSoup(res.text, 'html.parser')
lowercase__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 290 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase_( snake_case : Callable ):
'''simple docstring'''
@wraps(snake_case )
def _inner_fn(*snake_case : Optional[int] , **snake_case : List[Any] ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , snake_case , )
return fn(*snake_case , **snake_case )
return _inner_fn
| 85 | 0 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCamelCase :
@staticmethod
def snake_case_ (*__a , **__a ) -> List[str]:
pass
@is_pipeline_test
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
@require_torch
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase = image_classifier(__a , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__a ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
UpperCamelCase = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
],
[
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
],
[
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
],
[
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
],
[
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
],
] , )
@require_tf
def snake_case_ (self ) -> Tuple:
UpperCamelCase = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase = image_classifier(__a , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(__a ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
UpperCamelCase = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
],
[
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
],
[
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
],
[
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
],
[
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
{"score": 0.333, "label": ANY(__a )},
],
] , )
@slow
@require_torch
def snake_case_ (self ) -> List[str]:
UpperCamelCase = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase = image_classifier(__a , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__a ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
UpperCamelCase = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase = image_classifier(__a , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__a ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
UpperCamelCase = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 244 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCamelCase = str(bin(A__ ) )[2:] # remove the leading "0b"
UpperCamelCase = str(bin(A__ ) )[2:] # remove the leading "0b"
UpperCamelCase = max(len(A__ ) , len(A__ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = [0] * len(lowercase_ )
A__ = []
A__ = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
_lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 14 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bool:
lowerCamelCase__ : Union[str, Any] = [int(UpperCamelCase ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(UpperCamelCase ) == 4 and all(0 <= int(UpperCamelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
_A : List[str] =input().strip()
_A : Any ='''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F'{ip} is a {valid_or_invalid} IP v4 address.')
| 360 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Any = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : Union[str, Any] = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"""do_convert_rgb""": True,
}
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: Optional[int] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Any , **UpperCamelCase__: List[str] ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , **UpperCamelCase__: int ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : Any = self.get_rust_tokenizer()
lowerCamelCase__ : str = self.get_image_processor()
lowerCamelCase__ : str = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
lowerCamelCase__ : Any = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[int] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : str = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
lowerCamelCase__ : Optional[int] = self.get_image_processor(do_normalize=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=UpperCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : int = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.prepare_image_inputs()
lowerCamelCase__ : str = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.get_image_processor()
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase__ : Optional[Any] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Dict = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase__ : Any = self.prepare_image_inputs()
lowerCamelCase__ : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Any = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : int = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : int = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase__ : Tuple = self.prepare_image_inputs()
lowerCamelCase__ : str = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 129 | 0 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Union[str, Any]:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Tuple = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
lowerCamelCase__ : Union[str, Any] = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
lowerCamelCase__ : Any = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
lowerCamelCase__ : int = [element for element in set_a if element in set_b]
if alternative_union:
lowerCamelCase__ : int = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
lowerCamelCase__ : Dict = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = {"""a""", """b""", """c""", """d""", """e"""}
_UpperCAmelCase : Tuple = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 50 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = abs(UpperCAmelCase_ )
_UpperCamelCase : int = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = abs(UpperCAmelCase_ )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def A__ ( UpperCAmelCase_ ):
return sum(int(UpperCAmelCase_ ) for c in str(abs(UpperCAmelCase_ ) ) )
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
_UpperCamelCase : str = f'{func.__name__}({value})'
_UpperCamelCase : Tuple = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(UpperCAmelCase_ )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 83 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class A :
def __init__( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : Optional[Any]=10 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : int=37 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Dict=10 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Union[str, Any]=0.9 , lowerCAmelCase_ : str=None , ) -> int:
"""simple docstring"""
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = patch_size
_a = tubelet_size
_a = num_frames
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = mask_ratio
_a = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_a = (image_size // patch_size) ** 2
_a = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_a = int(mask_ratio * self.seq_length )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_a = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_a = VideoMAEModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = VideoMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_a = torch.ones((self.num_masks,) )
_a = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_a = mask.expand(self.batch_size , -1 ).bool()
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# model only returns predictions for masked patches
_a = mask.sum().item()
_a = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowercase_ = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = VideoMAEModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False ) -> Tuple:
"""simple docstring"""
_a = copy.deepcopy(lowerCAmelCase_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_a = torch.ones((self.model_tester.num_masks,) )
_a = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_a = mask.expand(self.model_tester.batch_size , -1 ).bool()
_a = bool_masked_pos.to(lowerCAmelCase_ )
if return_labels:
if model_class in [
*get_values(lowerCAmelCase_ ),
]:
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def __lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = VideoMAEModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
if not self.has_attentions:
pass
else:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
for model_class in self.all_model_classes:
_a = self.model_tester.seq_length - self.model_tester.num_masks
_a = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_a = True
_a = False
_a = True
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_a = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_a = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_a = len(lowerCAmelCase_ )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase_ ) )
_a = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ):
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_a = outputs.hidden_states
_a = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_a = self.model_tester.seq_length - self.model_tester.num_masks
_a = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
_a = np.load(UpperCamelCase )
return list(UpperCamelCase )
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_a = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
lowerCAmelCase_ )
_a = self.default_image_processor
_a = prepare_video()
_a = image_processor(lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
# verify the logits
_a = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_a = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowerCAmelCase_ )
_a = self.default_image_processor
_a = prepare_video()
_a = image_processor(lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
# add boolean mask, indicating which patches to mask
_a = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
_a = torch.load(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
# verify the logits
_a = torch.Size([1, 14_08, 15_36] )
_a = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=lowerCAmelCase_ )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_a = torch.tensor([0.5_1_4_2] , device=lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_a = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=lowerCAmelCase_ ).to(
lowerCAmelCase_ )
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
_a = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) )
| 356 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : str , UpperCamelCase : Any ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : str=0 ):
'''simple docstring'''
return sorted(UpperCamelCase , key=lambda UpperCamelCase : x[column] )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any]=float('''inf''' ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCamelCase ):
_a = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_a = current_dis
return min_dis
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : List[str]=float('''inf''' ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , UpperCamelCase ):
for j in range(max(0 , i - 6 ) , UpperCamelCase ):
_a = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_a = current_dis
return min_dis
def snake_case_ (UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(UpperCamelCase , UpperCamelCase )
# recursion
_a = points_counts // 2
_a = closest_pair_of_points_sqr(
UpperCamelCase , points_sorted_on_y[:mid] , UpperCamelCase )
_a = closest_pair_of_points_sqr(
UpperCamelCase , points_sorted_on_y[mid:] , points_counts - mid )
_a = min(UpperCamelCase , UpperCamelCase )
_a = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCamelCase )
_a = dis_between_closest_in_strip(
UpperCamelCase , len(UpperCamelCase ) , UpperCamelCase )
return min(UpperCamelCase , UpperCamelCase )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = column_based_sort(UpperCamelCase , column=0 )
_a = column_based_sort(UpperCamelCase , column=1 )
return (
closest_pair_of_points_sqr(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
) ** 0.5
if __name__ == "__main__":
_snake_case : int = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 179 | 0 |
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
UpperCamelCase = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase = re.compile(R'''\[([^\]]+)\]''')
def SCREAMING_SNAKE_CASE( __lowercase ) -> str:
A: int = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase="" , __lowercase=None , __lowercase=None ) -> str:
A: str = 0
A: Dict = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
A: Tuple = ['\n'.join(lines[:index] )]
else:
A: Tuple = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A: Tuple = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(A__ ) )
if index < len(A__ ) - 1:
A: List[Any] = [lines[index + 1]]
index += 1
else:
A: Union[str, Any] = []
else:
blocks.append('''\n'''.join(A__ ) )
A: List[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('''\n'''.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
def _inner(__lowercase ):
return key(A__ ).lower().replace('''_''' , '''''' )
return _inner
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=None ) -> List[str]:
# If no key is provided, we use a noop.
def noop(__lowercase ):
return x
if key is None:
A: List[Any] = noop
# Constants are all uppercase, they go first.
A: List[str] = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A: List[str] = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
A: Any = [obj for obj in objects if not key(A__ )[0].isupper()]
A: List[Any] = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[str]:
# This inner function sort imports between [ ].
def _replace(__lowercase ):
A: int = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
A: Any = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A: Optional[int] = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(A__ )] ) + "]"
A: Tuple = import_statement.split('''\n''' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A: str = 2 if lines[1].strip() == '[' else 1
A: int = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A: Optional[int] = sort_objects(A__ , key=lambda __lowercase : x[1] )
A: Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A: Optional[Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
A: str = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A: Optional[Any] = keys[:-1]
A: Any = get_indent(lines[1] ) + ', '.join([F"""\"{k}\"""" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
A: Optional[Any] = _re_bracket_content.sub(_replace , A__ )
return import_statement
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=True ) -> Union[str, Any]:
with open(A__ , '''r''' ) as f:
A: List[str] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A: str = split_code_in_indented_blocks(
A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A: Optional[int] = main_blocks[block_idx]
A: int = block.split('''\n''' )
# Get to the start of the imports.
A: int = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A: Tuple = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
A: str = '\n'.join(block_lines[line_idx:-1] )
A: Union[str, Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A: Tuple = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
A: Tuple = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A: Optional[int] = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A: List[str] = [(i, key) for i, key in enumerate(A__ ) if key is not None]
A: Tuple = [x[0] for x in sorted(A__ , key=lambda __lowercase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A: int = 0
A: List[str] = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
A: Union[str, Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
A: Optional[Any] = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(A__ ) )
def SCREAMING_SNAKE_CASE( __lowercase=True ) -> Tuple:
A: str = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
A: Any = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ )
if result:
A: Optional[Any] = [os.path.join(A__ , '''__init__.py''' )]
if len(A__ ) > 0:
raise ValueError(F"""Would overwrite {len(A__ )} files, run `make style`.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCamelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 319 | import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = KandinskyVaaControlnetPipeline
__lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCamelCase = False
@property
def __a ( self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def __a ( self ) -> int:
'''simple docstring'''
return 32
@property
def __a ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def __a ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __a ( self ) -> List[Any]:
'''simple docstring'''
return 100
@property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case__ : Tuple = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __a ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : int = self.dummy_unet
snake_case__ : Tuple = self.dummy_movq
snake_case__ : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=__UpperCamelCase , )
snake_case__ : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> int:
'''simple docstring'''
snake_case__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCamelCase )
# create hint
snake_case__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : Any = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : str = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : int = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = 'cpu'
snake_case__ : Any = self.get_dummy_components()
snake_case__ : Optional[Any] = self.pipeline_class(**__UpperCamelCase )
snake_case__ : Dict = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
snake_case__ : Dict = output.images
snake_case__ : Any = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
snake_case__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : str = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
snake_case__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
snake_case__ : List[str] = torch.from_numpy(np.array(__UpperCamelCase ) ).float() / 2_5_5.0
snake_case__ : Dict = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case__ : int = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
snake_case__ : int = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
snake_case__ : List[Any] = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[int] = 'A robot, 4k photo'
snake_case__ : List[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case__ , snake_case__ : Tuple = pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
snake_case__ : List[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case__ : Dict = pipeline(
image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , hint=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=100 , output_type='np' , )
snake_case__ : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 143 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_a : Tuple = logging.getLogger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = "sequence-classification"
def __init__( self , a__ ):
if type(a__ ) == dict:
_lowerCAmelCase : Optional[Any] = Namespace(**a__ )
_lowerCAmelCase : Tuple = glue_output_modes[hparams.task]
_lowerCAmelCase : str = glue_tasks_num_labels[hparams.task]
super().__init__(a__ , a__ , self.mode )
def __A ( self , **a__ ):
return self.model(**a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase : str = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_lowerCAmelCase : List[Any] = self(**a__ )
_lowerCAmelCase : Any = outputs[0]
_lowerCAmelCase : Dict = self.trainer.lr_schedulers[0]["""scheduler"""]
_lowerCAmelCase : Any = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __A ( self ):
_lowerCAmelCase : List[str] = self.hparams
_lowerCAmelCase : Optional[Any] = processors[args.task]()
_lowerCAmelCase : List[str] = processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase : Dict = self._feature_file(a__ )
if os.path.exists(a__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , a__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
_lowerCAmelCase : Optional[int] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase : Union[str, Any] = convert_examples_to_features(
a__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , a__ )
torch.save(a__ , a__ )
def __A ( self , a__ , a__ , a__ = False ):
_lowerCAmelCase : int = """dev""" if mode == """test""" else mode
_lowerCAmelCase : Optional[Any] = self._feature_file(a__ )
logger.info("""Loading features from cached file %s""" , a__ )
_lowerCAmelCase : List[str] = torch.load(a__ )
_lowerCAmelCase : Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase : Any = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase : Optional[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase : List[Any] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase : int = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a__ , a__ , a__ , a__ ) , batch_size=a__ , shuffle=a__ , )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Dict = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase : int = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_lowerCAmelCase : Tuple = self(**a__ )
_lowerCAmelCase : Optional[int] = outputs[:2]
_lowerCAmelCase : str = logits.detach().cpu().numpy()
_lowerCAmelCase : int = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase : Union[str, Any] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase : str = np.argmax(a__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase : List[Any] = np.squeeze(a__ )
_lowerCAmelCase : int = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
_lowerCAmelCase : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase : List[Any] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , a__ , a__ )}
_lowerCAmelCase : Union[str, Any] = dict(results.items() )
_lowerCAmelCase : Union[str, Any] = results
return ret, preds_list, out_label_list
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = self._eval_end(a__ )
_lowerCAmelCase : Dict = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = self._eval_end(a__ )
_lowerCAmelCase : str = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __A ( a__ , a__ ):
BaseTransformer.add_model_specific_args(a__ , a__ )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=a__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=a__ , required=a__ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=a__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
add_generic_args(_lowerCamelCase ,os.getcwd() )
_lowerCAmelCase : Optional[Any] = GLUETransformer.add_model_specific_args(_lowerCamelCase ,os.getcwd() )
_lowerCAmelCase : Dict = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase : str = os.path.join(
"""./results""" ,f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" ,)
os.makedirs(args.output_dir )
_lowerCAmelCase : Union[str, Any] = GLUETransformer(_lowerCamelCase )
_lowerCAmelCase : List[Any] = generic_train(_lowerCamelCase ,_lowerCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir ,"""checkpoint-epoch=*.ckpt""" ) ,recursive=_lowerCamelCase ) )
_lowerCAmelCase : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowerCamelCase )
if __name__ == "__main__":
main()
| 350 | """simple docstring"""
from manim import *
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self ):
_lowerCAmelCase : Any = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase : List[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_lowerCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Any = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Optional[int] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Tuple = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Optional[Any] = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Dict = Text("""CPU""" , font_size=24 )
_lowerCAmelCase : str = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
_lowerCAmelCase : Dict = [mem.copy() for i in range(4 )]
_lowerCAmelCase : Any = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Tuple = Text("""GPU""" , font_size=24 )
_lowerCAmelCase : Optional[int] = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
gpu.move_to([-1, -1, 0] )
self.add(a__ )
_lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Any = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : List[str] = Text("""Model""" , font_size=24 )
_lowerCAmelCase : Any = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.add(a__ )
_lowerCAmelCase : Tuple = []
for i, rect in enumerate(a__ ):
rect.set_stroke(a__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCAmelCase : List[str] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=a__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=a__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=a__ , buff=0.0 )
self.add(a__ )
cpu_targs.append(a__ )
_lowerCAmelCase : Any = [mem.copy() for i in range(6 )]
_lowerCAmelCase : List[str] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : int = Text("""Loaded Checkpoint""" , font_size=24 )
_lowerCAmelCase : Optional[int] = Group(a__ , a__ ).arrange(a__ , aligned_edge=a__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase : List[str] = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a__ , a__ )
_lowerCAmelCase : int = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowerCAmelCase : List[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ ) , Write(a__ ) )
self.play(Write(a__ , run_time=1 ) , Create(a__ , run_time=1 ) )
_lowerCAmelCase : int = []
_lowerCAmelCase : List[Any] = []
for i, rect in enumerate(a__ ):
_lowerCAmelCase : Tuple = fill.copy().set_fill(a__ , opacity=0.7 )
target.move_to(a__ )
first_animations.append(GrowFromCenter(a__ , run_time=1 ) )
_lowerCAmelCase : Optional[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(a__ , run_time=1.5 ) )
self.play(*a__ )
self.play(*a__ )
self.wait()
| 126 | 0 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
UpperCamelCase__ = mlp_ratio
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = hidden_act
UpperCamelCase__ = use_absolute_embeddings
UpperCamelCase__ = patch_norm
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = is_training
UpperCamelCase__ = scope
UpperCamelCase__ = use_labels
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = encoder_stride
def UpperCAmelCase_ (self ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ (self ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = SwinvaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = SwinvaForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = SwinvaForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = SwinvaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = SwinvaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 )
def UpperCAmelCase_ (self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def UpperCAmelCase_ (self ):
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = outputs.attentions
UpperCamelCase__ = len(self.model_tester.depths )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = config.window_size**2
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
UpperCamelCase__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCamelCase__ = 2
self.assertEqual(out_len + added_hidden_states , len(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = outputs.hidden_states
UpperCamelCase__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Swinv2 has a different seq_length
UpperCamelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCamelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = reshaped_hidden_states[0].shape
UpperCamelCase__ = (
reshaped_hidden_states[0].view(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCamelCase__ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = 3
UpperCamelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCamelCase__ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase_ (self ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = SwinvaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ (self ):
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 244 |
from __future__ import annotations
from typing import Generic, TypeVar
lowerCamelCase_ = TypeVar('''T''')
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self ):
# map from node name to the node object
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# merge 2 disjoint sets
self.link(self.find_set(SCREAMING_SNAKE_CASE_ ) , self.find_set(SCREAMING_SNAKE_CASE_ ) )
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self ):
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# add an edge with the given weight
self.add_node(SCREAMING_SNAKE_CASE_ )
self.add_node(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def UpperCAmelCase_ (self ):
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(SCREAMING_SNAKE_CASE_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
disjoint_set.union(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return graph
| 244 | 1 |
'''simple docstring'''
from math import sqrt
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __snake_case : int = 1_0001 ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
lowerCamelCase_ =1
while count != nth and number < 3:
number += 1
if is_prime(__snake_case ):
count += 1
while count != nth:
number += 2
if is_prime(__snake_case ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
a_ : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
a_ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
a_ : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Dict =DPRContextEncoderTokenizer
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] =DPRQuestionEncoderTokenizer
a_ : Union[str, Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
a_ : Dict = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCamelCase__ )
class __UpperCamelCase :
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
elif titles is None or texts is None:
lowerCamelCase_ =titles if texts is None else texts
return super().__call__(
lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles]
lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'''
lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ =attention_mask
return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ):
"""simple docstring"""
lowerCamelCase_ =reader_input['''input_ids''']
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ )
lowerCamelCase_ =[]
for doc_id in sorted_docs:
lowerCamelCase_ =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ =sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =[]
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
lowerCamelCase_ =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase_ =end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION
lowercase : int =['input_ids', 'attention_mask']
lowercase : Dict =DPRReaderTokenizer
| 6 | 1 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = len(lowerCamelCase_ ) + 1
lowerCAmelCase : List[str] = len(lowerCamelCase_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowerCAmelCase : Any = [[0 for i in range(lowerCamelCase_ )] for j in range(lowerCamelCase_ )]
# since string of zero length match pattern of zero length
lowerCAmelCase : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1, lowerCamelCase_ ):
lowerCAmelCase : List[str] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1, lowerCamelCase_ ):
lowerCAmelCase : List[Any] = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1, lowerCamelCase_ ):
for j in range(1, lowerCamelCase_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowerCAmelCase : str = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowerCAmelCase : Tuple = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowerCAmelCase : List[Any] = dp[i - 1][j]
else:
lowerCAmelCase : Union[str, Any] = 0
else:
lowerCAmelCase : int = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__A : Dict = 'aab'
__A : Union[str, Any] = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'{input_string} matches the given pattern {pattern}')
else:
print(F'{input_string} does not match with the given pattern {pattern}')
| 138 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase__ ( lowerCamelCase_ : Any):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ : Dict = name.replace('''img_encoder.pos_embed''' ,'''vision_model.embeddings.position_embeddings''')
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ : int = name.replace('''img_encoder.patch_embed.proj''' ,'''vision_model.embeddings.patch_embeddings.projection''')
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ : Optional[int] = name.replace('''img_encoder.patch_embed.norm''' ,'''vision_model.embeddings.layernorm''')
if "img_encoder.layers" in name:
lowerCAmelCase__ : Tuple = name.replace('''img_encoder.layers''' ,'''vision_model.encoder.stages''')
if "blocks" in name and "res" not in name:
lowerCAmelCase__ : Dict = name.replace('''blocks''' ,'''layers''')
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ : Optional[int] = name.replace('''attn''' ,'''self_attn''')
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''proj''' ,'''out_proj''')
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ : List[Any] = name.replace('''pre_assign_attn.attn.proj''' ,'''pre_assign_attn.attn.out_proj''')
if "norm1" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''norm1''' ,'''layer_norm1''')
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ : int = name.replace('''norm2''' ,'''layer_norm2''')
if "img_encoder.norm" in name:
lowerCAmelCase__ : List[Any] = name.replace('''img_encoder.norm''' ,'''vision_model.layernorm''')
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ : List[Any] = name.replace('''text_encoder.token_embedding''' ,'''text_model.embeddings.token_embedding''')
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ : Tuple = name.replace('''text_encoder.positional_embedding''' ,'''text_model.embeddings.position_embedding.weight''')
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''text_encoder.transformer.resblocks.''' ,'''text_model.encoder.layers.''')
if "ln_1" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_1''' ,'''layer_norm1''')
if "ln_2" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_2''' ,'''layer_norm2''')
if "c_fc" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('''c_fc''' ,'''fc1''')
if "c_proj" in name:
lowerCAmelCase__ : List[str] = name.replace('''c_proj''' ,'''fc2''')
if "text_encoder" in name:
lowerCAmelCase__ : str = name.replace('''text_encoder''' ,'''text_model''')
if "ln_final" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_final''' ,'''final_layer_norm''')
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ : Tuple = name.replace('''img_projector.linear_hidden.''' ,'''visual_projection.''')
if "img_projector.linear_out." in name:
lowerCAmelCase__ : Optional[Any] = name.replace('''img_projector.linear_out.''' ,'''visual_projection.3.''')
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ : Tuple = name.replace('''text_projector.linear_hidden''' ,'''text_projection''')
if "text_projector.linear_out" in name:
lowerCAmelCase__ : Dict = name.replace('''text_projector.linear_out''' ,'''text_projection.3''')
return name
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : List[str]):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : List[str] = orig_state_dict.pop(lowerCamelCase_)
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ : Tuple = key.split('''.''')
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = int(key_split[2]), int(key_split[4])
lowerCAmelCase__ : Any = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ : Tuple = val[:dim, :]
lowerCAmelCase__ : Dict = val[dim : dim * 2, :]
lowerCAmelCase__ : List[str] = val[-dim:, :]
else:
lowerCAmelCase__ : List[Any] = val[:dim]
lowerCAmelCase__ : List[str] = val[dim : dim * 2]
lowerCAmelCase__ : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ : Dict = key.split('''.''')
lowerCAmelCase__ : List[str] = int(key_split[3])
lowerCAmelCase__ : Any = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ : Tuple = val[:dim, :]
lowerCAmelCase__ : Union[str, Any] = val[
dim : dim * 2, :
]
lowerCAmelCase__ : List[Any] = val[-dim:, :]
else:
lowerCAmelCase__ : Union[str, Any] = val[:dim]
lowerCAmelCase__ : List[str] = val[dim : dim * 2]
lowerCAmelCase__ : str = val[-dim:]
else:
lowerCAmelCase__ : int = rename_key(lowerCamelCase_)
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ : Dict = val.squeeze_()
else:
lowerCAmelCase__ : Tuple = val
return orig_state_dict
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ : str = Image.open(requests.get(lowerCamelCase_ ,stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : int ,lowerCamelCase_ : Tuple="groupvit-gcc-yfcc" ,lowerCamelCase_ : int=False):
'''simple docstring'''
lowerCAmelCase__ : Dict = GroupViTConfig()
lowerCAmelCase__ : Dict = GroupViTModel(lowerCamelCase_).eval()
lowerCAmelCase__ : Optional[int] = torch.load(lowerCamelCase_ ,map_location='''cpu''')['''model''']
lowerCAmelCase__ : List[Any] = convert_state_dict(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ , lowerCAmelCase__ : Any = model.load_state_dict(lowerCamelCase_ ,strict=lowerCamelCase_)
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase_) == 0)
# verify result
lowerCAmelCase__ : Optional[Any] = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''')
lowerCAmelCase__ : Tuple = prepare_img()
lowerCAmelCase__ : Dict = processor(text=['''a photo of a cat''', '''a photo of a dog'''] ,images=lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors='''pt''')
with torch.no_grad():
lowerCAmelCase__ : str = model(**lowerCamelCase_)
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[13.3523, 6.3629]])
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ : Tuple = torch.tensor([[16.1873, 8.6230]])
else:
raise ValueError(f"""Model name {model_name} not supported.""")
assert torch.allclose(outputs.logits_per_image ,lowerCamelCase_ ,atol=1E-3)
processor.save_pretrained(lowerCamelCase_)
model.save_pretrained(lowerCamelCase_)
print('''Successfully saved processor and model to''' ,lowerCamelCase_)
if push_to_hub:
print('''Pushing to the hub...''')
processor.push_to_hub(lowerCamelCase_ ,organization='''nielsr''')
model.push_to_hub(lowerCamelCase_ ,organization='''nielsr''')
if __name__ == "__main__":
__snake_case : int =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
__snake_case : Tuple =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 129 | 0 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase: Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase: List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowerCAmelCase: Any = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowerCAmelCase: Union[str, Any] = {
'''abeja/gpt-neox-japanese-2.7b''': 2_0_4_8,
}
def lowerCamelCase__ ( _A , _A ):
with open(_lowercase , 'r' , encoding='utf-8' ) as f:
a : List[Any] = json.loads(f.read() )
a : List[str] = collections.OrderedDict()
a : str = collections.OrderedDict()
a : Tuple = collections.OrderedDict()
with open(_lowercase , 'r' , encoding='utf-8' ) as f:
a : List[Any] = f.readlines()
a : Tuple = [[t.rstrip('\n' )] if (t == "," or "," not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(_lowercase ):
a : Optional[int] = b
a : Dict = idx
for wd in b:
a : Tuple = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class a__( a__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[str]="<|endoftext|>" , __snake_case : Any="<|endoftext|>" , __snake_case : Optional[int]="<|startoftext|>" , __snake_case : List[Any]="<|endoftext|>" , __snake_case : Optional[int]=False , **__snake_case : List[Any] , ):
super().__init__(
unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , do_clean_text=lowerCAmelCase__ , **lowerCAmelCase__ , )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
a : List[Any] = do_clean_text
a : Any = load_vocab_and_emoji(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowercase_ ( self : Any ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def lowercase_ ( self : Tuple ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowercase_ ( self : Any , __snake_case : str ):
return self.subword_tokenizer.tokenize(lowerCAmelCase__ , clean=self.do_clean_text )
def lowercase_ ( self : int , __snake_case : Optional[Any] ):
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def lowercase_ ( self : str , __snake_case : Optional[int] ):
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase__ )
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[int] ):
a : Optional[int] = "".join(lowerCAmelCase__ ).strip()
return out_string
def lowercase_ ( self : Tuple , __snake_case : Any ):
a : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
a : str = input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self : str , __snake_case : int , __snake_case : Any = None ):
a : List[Any] = 0
if os.path.isdir(lowerCAmelCase__ ):
a : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
a : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
a : Any = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a : int = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
a : Union[str, Any] = token_index
writer.write(','.join(lowerCAmelCase__ ) + '\n' )
index += 1
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , lowerCAmelCase__ )
return vocab_file, emoji_file
class a__( a__ ):
def __init__( self : Any , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
a : List[str] = vocab # same as swe
a : Dict = ids_to_tokens # same as bpe
a : List[str] = emoji
a : Optional[int] = np.max([len(lowerCAmelCase__ ) for w in self.vocab.keys()] )
a : List[str] = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
a : List[Any] = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
a : Tuple = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
a : Any = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
a : str = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
a : Tuple = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
a : Dict = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a : List[Any] = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a : int = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : Optional[int] ):
return len(self.ids_to_tokens )
def lowercase_ ( self : List[Any] , __snake_case : Dict ):
a : Optional[int] = self.content_repattera.sub('<URL>' , lowerCAmelCase__ )
a : Tuple = self.content_repattera.sub('<EMAIL>' , lowerCAmelCase__ )
a : int = self.content_repattera.sub('<TEL>' , lowerCAmelCase__ )
a : Union[str, Any] = self.content_repattera.sub('<DATE>' , lowerCAmelCase__ )
a : Dict = self.content_repattera.sub('<DATE>' , lowerCAmelCase__ )
a : Any = self.content_repattera.sub('<PRICE>' , lowerCAmelCase__ )
a : int = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
a : Tuple = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def lowercase_ ( self : List[str] , __snake_case : List[str] , __snake_case : Tuple=False ):
a : Optional[int] = text.replace(' ' , '<SP>' )
a : Any = text.replace(' ' , '<SP>' )
a : Tuple = text.replace('\r\n' , '<BR>' )
a : Optional[Any] = text.replace('\n' , '<BR>' )
a : str = text.replace('\r' , '<BR>' )
a : Optional[int] = text.replace('\t' , '<TAB>' )
a : str = text.replace('—' , 'ー' )
a : Optional[Any] = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
a : Any = text.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if clean:
a : Optional[Any] = self.clean_text(lowerCAmelCase__ )
def check_simbol(__snake_case : Optional[Any] ):
a : Optional[int] = x.encode()
if len(lowerCAmelCase__ ) == 1 and len(lowerCAmelCase__ ) == 2:
a : Tuple = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC2_A1 and c <= 0xC2_BF)
or (c >= 0xC7_80 and c <= 0xC7_83)
or (c >= 0xCA_B9 and c <= 0xCB_BF)
or (c >= 0xCC_80 and c <= 0xCD_A2)
):
return True
return False
def checkuae(__snake_case : Tuple ):
a : List[Any] = x.encode()
if len(lowerCAmelCase__ ) == 1 and len(lowerCAmelCase__ ) == 3:
a : Union[str, Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE2_80_80 and c <= 0xE2_B0_7F:
return True
return False
a : List[Any] = 0
a : Tuple = []
while pos < len(lowerCAmelCase__ ):
a : str = min(len(lowerCAmelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
a : Any = [] # (token_id, token, pos)
for e in range(lowerCAmelCase__ , lowerCAmelCase__ , -1 ):
a : List[str] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase__ ) > 2:
a : int = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCAmelCase__ ) > 0:
# the smallest token_id is adopted
a : List[Any] = sorted(lowerCAmelCase__ , key=lambda __snake_case : x[0] )[0]
result.append(lowerCAmelCase__ )
a : Any = e
else:
a : Optional[Any] = pos + 1
a : Optional[Any] = text[pos:end]
if check_simbol(lowerCAmelCase__ ):
result.append('<KIGOU>' )
elif checkuae(lowerCAmelCase__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
a : List[Any] = end
return result
def lowercase_ ( self : Optional[int] , __snake_case : List[str] , __snake_case : Dict="\n" ):
a : List[Any] = []
a : Dict = []
a : Tuple = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCAmelCase__ ) > 0:
words.append(bytearray(lowerCAmelCase__ ).decode('utf-8' , errors='replace' ) )
a : Any = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(lowerCAmelCase__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
words.append(bytearray(lowerCAmelCase__ ).decode('utf-8' , errors='replace' ) )
a : Any = "".join(lowerCAmelCase__ )
return text | 363 |
'''simple docstring'''
lowerCAmelCase: Union[str, Any] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
lowerCAmelCase: Optional[Any] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 1_2,
'Pm': 1_5,
'Em': 1_8,
'Zm': 2_1,
'Ym': 2_4,
}
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[int] = from_type.lower().strip('s' )
a : Optional[Any] = to_type.lower().strip('s' )
a : Dict = UNIT_SYMBOL.get(_A , _A )
a : Optional[Any] = UNIT_SYMBOL.get(_A , _A )
if from_sanitized not in METRIC_CONVERSION:
a : Optional[Any] = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_A )}"""
)
raise ValueError(_A )
if to_sanitized not in METRIC_CONVERSION:
a : Union[str, Any] = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_A )}"""
)
raise ValueError(_A )
a : List[Any] = METRIC_CONVERSION[from_sanitized]
a : int = METRIC_CONVERSION[to_sanitized]
a : Tuple = 1
if from_exponent > to_exponent:
a : Optional[int] = from_exponent - to_exponent
else:
a : Optional[Any] = -(to_exponent - from_exponent)
return value * pow(10 , _A )
if __name__ == "__main__":
from doctest import testmod
testmod() | 96 | 0 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _A (lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> np.ndarray:
'''simple docstring'''
_a = cva.getAffineTransform(snake_case_ , snake_case_ )
return cva.warpAffine(snake_case_ , snake_case_ , (rows, cols) )
if __name__ == "__main__":
# read original image
a_ : Union[str, Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
a_ : Optional[int] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a_ , a_ : List[Any] = gray_img.shape
# set different points to rotate image
a_ : Optional[int] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
a_ : Tuple = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
a_ : Dict = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
a_ : str = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
a_ : List[str] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a_ : List[str] = plt.figure(1)
a_ : Optional[Any] = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 168 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = 1
__A : Any = 3
__A : List[str] = (32, 32)
__A : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Any = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__A : int = self.dummy_cond_unet_upscale
__A : Union[str, Any] = DDPMScheduler()
__A : Dict = DDIMScheduler(prediction_type='''v_prediction''' )
__A : int = self.dummy_vae
__A : int = self.dummy_text_encoder
__A : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : Any = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__A : Dict = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__A : str = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : List[str] = '''A painting of a squirrel eating a burger'''
__A : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : List[str] = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__A : Union[str, Any] = output.images
__A : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : str = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=__lowerCamelCase , )[0]
__A : Tuple = image[0, -3:, -3:, -1]
__A : int = image_from_tuple[0, -3:, -3:, -1]
__A : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__A : str = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__A : Dict = self.dummy_cond_unet_upscale
__A : List[str] = DDPMScheduler()
__A : str = DDIMScheduler(prediction_type='''v_prediction''' )
__A : Optional[int] = self.dummy_vae
__A : Optional[Any] = self.dummy_text_encoder
__A : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : int = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__A : Any = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__A : Any = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Any = '''A painting of a squirrel eating a burger'''
__A : Any = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__A : Union[str, Any] = output.images
assert image.shape[0] == 2
__A : Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : Any = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__A : Union[str, Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self.dummy_cond_unet_upscale
__A : int = DDPMScheduler()
__A : List[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
__A : Optional[Any] = self.dummy_vae
__A : List[str] = self.dummy_text_encoder
__A : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : int = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__A : Union[str, Any] = unet.half()
__A : Optional[int] = text_encoder.half()
# make sure here that pndm scheduler skips prk
__A : Optional[int] = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__A : Union[str, Any] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Union[str, Any] = '''A painting of a squirrel eating a burger'''
__A : Optional[Any] = torch.manual_seed(0 )
__A : Tuple = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type='''np''' , ).images
__A : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__A : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
__A : str = '''stabilityai/stable-diffusion-x4-upscaler'''
__A : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__A : Union[str, Any] = '''a cat sitting on a park bench'''
__A : Union[str, Any] = torch.manual_seed(0 )
__A : Optional[Any] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type='''np''' , )
__A : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__A : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
__A : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
__A : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__A : Dict = '''a cat sitting on a park bench'''
__A : Any = torch.manual_seed(0 )
__A : Optional[int] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type='''np''' , )
__A : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCamelCase__( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__A : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__A : List[str] = '''stabilityai/stable-diffusion-x4-upscaler'''
__A : Dict = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__A : Tuple = '''a cat sitting on a park bench'''
__A : Tuple = torch.manual_seed(0 )
__A : List[str] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , output_type='''np''' , )
__A : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 179 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ : Dict = '''BridgeTowerImageProcessor'''
UpperCamelCase_ : Optional[int] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : List[Any] , ):
SCREAMING_SNAKE_CASE : int = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def _A ( self : List[str] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _A ( self : int , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 369 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 242 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowerCAmelCase = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowerCAmelCase = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Optional[Any]="binary" , lowerCamelCase_ :int=None , lowerCamelCase_ :List[Any]="warn" , ):
"""simple docstring"""
lowerCamelCase__ : List[str] =recall_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ , zero_division=lowerCamelCase_ , )
return {"recall": float(lowerCamelCase_ ) if score.size == 1 else score} | 126 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( A__ , A__ ):
UpperCAmelCase_ :List[str] = "focalnet"
def __init__( self , __A=224 , __A=4 , __A=3 , __A=96 , __A=False , __A=[192, 384, 768, 768] , __A=[2, 2, 6, 2] , __A=[2, 2, 2, 2] , __A=[3, 3, 3, 3] , __A="gelu" , __A=4.0 , __A=0.0 , __A=0.1 , __A=False , __A=1E-4 , __A=False , __A=False , __A=False , __A=0.0_2 , __A=1E-5 , __A=32 , __A=None , __A=None , **__A , ) -> List[Any]:
super().__init__(**__A )
lowerCAmelCase_ :List[str] = image_size
lowerCAmelCase_ :str = patch_size
lowerCAmelCase_ :Optional[Any] = num_channels
lowerCAmelCase_ :Dict = embed_dim
lowerCAmelCase_ :Dict = use_conv_embed
lowerCAmelCase_ :List[str] = hidden_sizes
lowerCAmelCase_ :List[Any] = depths
lowerCAmelCase_ :Tuple = focal_levels
lowerCAmelCase_ :Union[str, Any] = focal_windows
lowerCAmelCase_ :Any = hidden_act
lowerCAmelCase_ :Dict = mlp_ratio
lowerCAmelCase_ :Tuple = hidden_dropout_prob
lowerCAmelCase_ :str = drop_path_rate
lowerCAmelCase_ :Dict = use_layerscale
lowerCAmelCase_ :Optional[int] = layerscale_value
lowerCAmelCase_ :int = use_post_layernorm
lowerCAmelCase_ :int = use_post_layernorm_in_modulation
lowerCAmelCase_ :List[Any] = normalize_modulator
lowerCAmelCase_ :str = initializer_range
lowerCAmelCase_ :int = layer_norm_eps
lowerCAmelCase_ :Tuple = encoder_stride
lowerCAmelCase_ :str = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 1 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCAmelCase_ :List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text )
# Initialize a Pandas dataframe with the column titles
lowerCAmelCase_ :Union[str, Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
lowerCAmelCase_ :str = item.ha.text
lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""]
lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
lowerCAmelCase_ :int = """Not available"""
try:
lowerCAmelCase_ :str = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
lowerCAmelCase_ :Optional[Any] = """"""
try:
lowerCAmelCase_ :str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_0_0 )
except ValueError:
lowerCAmelCase_ :Union[str, Any] = float("""nan""" )
except AttributeError:
pass
lowerCAmelCase_ :Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCAmelCase_ :List[Any] = """ """
lowerCAmelCase_ :Tuple = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 1 | 1 |
from math import sqrt
def __lowerCAmelCase ( a__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCAmelCase ( a__ = 1_0001 ) -> int:
__a = 0
__a = 1
while count != nth and number < 3:
number += 1
if is_prime(a__ ):
count += 1
while count != nth:
number += 2
if is_prime(a__ ):
count += 1
return number
if __name__ == "__main__":
print(F"{solution() = }") | 6 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : str = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 1 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
UpperCAmelCase__ : Union[str, Any] ='''bert-base-cased'''
UpperCAmelCase__ : int ='''google/pegasus-xsum'''
UpperCAmelCase__ : List[str] =[''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
UpperCAmelCase__ : List[str] =['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
UpperCAmelCase__ : Optional[int] ='''patrickvonplaten/t5-tiny-random'''
UpperCAmelCase__ : Any ='''sshleifer/bart-tiny-random'''
UpperCAmelCase__ : Optional[int] ='''sshleifer/tiny-mbart'''
UpperCAmelCase__ : Dict ='''sshleifer/tiny-marian-en-de'''
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
lowerCamelCase ='\n'.join(__a )
Path(__a ).open("""w""" ).writelines(__a )
def _lowercase ( _UpperCAmelCase ) -> int:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__a , F"""{split}.source""" ) , __a )
_dump_articles(os.path.join(__a , F"""{split}.target""" ) , __a )
return tmp_dir
class __A ( snake_case_ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =AutoTokenizer.from_pretrained(_A )
lowerCamelCase =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase =max(len(tokenizer.encode(_A ) ) for a in ARTICLES )
lowerCamelCase =max(len(tokenizer.encode(_A ) ) for a in SUMMARIES )
lowerCamelCase =4
lowerCamelCase =8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCamelCase ='ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
lowerCamelCase =SeqaSeqDataset(
_A , data_dir=_A , type_path="""train""" , max_source_length=_A , max_target_length=_A , src_lang=_A , tgt_lang=_A , )
lowerCamelCase =DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_A , _A )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCamelCase =shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =AutoTokenizer.from_pretrained(_A )
lowerCamelCase =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase =max(len(tokenizer.encode(_A ) ) for a in ARTICLES )
lowerCamelCase =max(len(tokenizer.encode(_A ) ) for a in SUMMARIES )
lowerCamelCase =4
lowerCamelCase =LegacySeqaSeqDataset(
_A , data_dir=_A , type_path="""train""" , max_source_length=20 , max_target_length=_A , )
lowerCamelCase =DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def _snake_case ( self ):
lowerCamelCase =AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
lowerCamelCase =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCamelCase =tmp_dir.joinpath("""train.source""" ).open().readlines()
lowerCamelCase =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_A , _A , 128 , _A )
lowerCamelCase ={x.name for x in tmp_dir.iterdir()}
lowerCamelCase ={x.name for x in save_dir.iterdir()}
lowerCamelCase =save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_A ) < len(_A )
assert len(_A ) == 1
assert len(packed_examples[0] ) == sum(len(_A ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def _snake_case ( self ):
if not FAIRSEQ_AVAILABLE:
return
lowerCamelCase =self._get_dataset(max_len=64 )
lowerCamelCase =64
lowerCamelCase =ds.make_dynamic_sampler(_A , required_batch_size_multiple=_A )
lowerCamelCase =[len(_A ) for x in batch_sampler]
assert len(set(_A ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_A ) == len(_A ) # no dropped or added examples
lowerCamelCase =DataLoader(_A , batch_sampler=_A , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase =[]
lowerCamelCase =[]
for batch in data_loader:
lowerCamelCase =batch['input_ids'].shape
lowerCamelCase =src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCamelCase =np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(_A )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_A )
assert num_src_per_batch[0] == max(_A )
if failures:
raise AssertionError(f"""too many tokens in {len(_A )} batches""" )
def _snake_case ( self ):
lowerCamelCase =self._get_dataset(max_len=512 )
lowerCamelCase =2
lowerCamelCase =ds.make_sortish_sampler(_A , shuffle=_A )
lowerCamelCase =DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase =DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2 , sampler=_A )
lowerCamelCase =tokenizer.pad_token_id
def count_pad_tokens(UpperCAmelCase_ , UpperCAmelCase_="input_ids" ):
return [batch[k].eq(_A ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_A , k="""labels""" ) ) < sum(count_pad_tokens(_A , k="""labels""" ) )
assert sum(count_pad_tokens(_A ) ) < sum(count_pad_tokens(_A ) )
assert len(_A ) == len(_A )
def _snake_case ( self , UpperCAmelCase_=1000 , UpperCAmelCase_=128 ):
if os.getenv("""USE_REAL_DATA""" , _A ):
lowerCamelCase ='examples/seq2seq/wmt_en_ro'
lowerCamelCase =max_len * 2 * 64
if not Path(_A ).joinpath("""train.len""" ).exists():
save_len_file(_A , _A )
else:
lowerCamelCase ='examples/seq2seq/test_data/wmt_en_ro'
lowerCamelCase =max_len * 4
save_len_file(_A , _A )
lowerCamelCase =AutoTokenizer.from_pretrained(_A )
lowerCamelCase =SeqaSeqDataset(
_A , data_dir=_A , type_path="""train""" , max_source_length=_A , max_target_length=_A , n_obs=_A , )
return ds, max_tokens, tokenizer
def _snake_case ( self ):
lowerCamelCase =self._get_dataset()
lowerCamelCase =set(DistributedSortishSampler(_A , 256 , num_replicas=2 , rank=0 , add_extra_examples=_A ) )
lowerCamelCase =set(DistributedSortishSampler(_A , 256 , num_replicas=2 , rank=1 , add_extra_examples=_A ) )
assert idsa.intersection(_A ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =AutoTokenizer.from_pretrained(_A , use_fast=_A )
if tok_name == MBART_TINY:
lowerCamelCase =SeqaSeqDataset(
_A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
lowerCamelCase =train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCamelCase =SeqaSeqDataset(
_A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
lowerCamelCase =train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_A ) == 1 if tok_name == BART_TINY else len(_A ) == 0
| 350 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =(1 - _cos) / 2
lowerCamelCase =1 - _cos
lowerCamelCase =1 + alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =(1 + _cos) / 2
lowerCamelCase =-1 - _cos
lowerCamelCase =1 + alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =_sin / 2
lowerCamelCase =0
lowerCamelCase =-ba
lowerCamelCase =1 + alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =1 - alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 + alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =10 ** (gain_db / 40)
lowerCamelCase =1 + alpha * big_a
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha * big_a
lowerCamelCase =1 + alpha / big_a
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha / big_a
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =10 ** (gain_db / 40)
lowerCamelCase =(big_a + 1) - (big_a - 1) * _cos
lowerCamelCase =(big_a + 1) + (big_a - 1) * _cos
lowerCamelCase =(big_a - 1) - (big_a + 1) * _cos
lowerCamelCase =(big_a - 1) + (big_a + 1) * _cos
lowerCamelCase =2 * sqrt(_UpperCAmelCase ) * alpha
lowerCamelCase =big_a * (pmc + aaa)
lowerCamelCase =2 * big_a * mpc
lowerCamelCase =big_a * (pmc - aaa)
lowerCamelCase =ppmc + aaa
lowerCamelCase =-2 * pmpc
lowerCamelCase =ppmc - aaa
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =10 ** (gain_db / 40)
lowerCamelCase =(big_a + 1) - (big_a - 1) * _cos
lowerCamelCase =(big_a + 1) + (big_a - 1) * _cos
lowerCamelCase =(big_a - 1) - (big_a + 1) * _cos
lowerCamelCase =(big_a - 1) + (big_a + 1) * _cos
lowerCamelCase =2 * sqrt(_UpperCAmelCase ) * alpha
lowerCamelCase =big_a * (ppmc + aaa)
lowerCamelCase =-2 * big_a * pmpc
lowerCamelCase =big_a * (ppmc - aaa)
lowerCamelCase =pmc + aaa
lowerCamelCase =2 * mpc
lowerCamelCase =pmc - aaa
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 262 | 0 |
"""simple docstring"""
import torch
from transformers import AutoModel
class UpperCAmelCase_ ( torch.nn.Module):
def __init__( self , a="sayef/fsner-bert-base-uncased" ) -> Tuple:
super(a , self ).__init__()
lowercase__ : str = AutoModel.from_pretrained(a , return_dict=a )
lowercase__ : Any = torch.nn.CosineSimilarity(3 , 1e-08 )
lowercase__ : Union[str, Any] = torch.nn.Softmax(dim=1 )
def _UpperCAmelCase ( self , **a ) -> List[str]:
return self.bert(**a ).last_hidden_state
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
return token_embeddings.sum(2 , keepdim=a )
def _UpperCAmelCase ( self , a , a , a=1 ) -> str:
return self.softmax(T * self.cos(a , a ) )
def _UpperCAmelCase ( self , a , a ) -> str:
lowercase__ : Union[str, Any] = W_supports['sizes'].tolist()
lowercase__ : str = W_supports['start_token_id'].item()
lowercase__ : str = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase__ : Any = self.BERT(**a )
lowercase__ : Any = self.BERT(**a )
lowercase__ : Tuple = None
lowercase__ : Dict = None
lowercase__ : str = W_supports['input_ids'] == start_token_id
lowercase__ : List[str] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(a ):
if i == 0:
lowercase__ : int = 0
else:
lowercase__ : Dict = support_sizes[i - 1]
lowercase__ : Dict = S[s : s + size][start_token_masks[s : s + size]]
lowercase__ : int = S[s : s + size][end_token_masks[s : s + size]]
lowercase__ : Any = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase__ : int = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase__ : Tuple = torch.vstack((p_starts, p_start) )
lowercase__ : Tuple = torch.vstack((p_ends, p_end) )
else:
lowercase__ : Optional[int] = p_start
lowercase__ : Optional[int] = p_end
return p_starts, p_ends
| 77 |
"""simple docstring"""
import socket
def _snake_case ( ):
_lowerCamelCase : List[Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCamelCase : Union[str, Any] = socket.gethostname()
_lowerCamelCase : List[Any] = 12312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_lowerCamelCase : int = sock.recv(1024 )
if not data:
break
out_file.write(lowercase__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main() | 96 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case (UpperCAmelCase_ , unittest.TestCase):
__A : Any =DebertaTokenizer
__A : List[str] =True
__A : Optional[Any] =DebertaTokenizerFast
def UpperCamelCase__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCAmelCase_ : List[str] = dict(zip(__lowercase ,range(len(__lowercase ) ) ) )
UpperCAmelCase_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase_ : Dict = {'''unk_token''': '''[UNK]'''}
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowercase ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(__lowercase ) )
def UpperCamelCase__ ( self ,**_snake_case ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__lowercase )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = '''lower newer'''
UpperCAmelCase_ : Union[str, Any] = '''lower newer'''
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Any = '''lower newer'''
UpperCAmelCase_ : Optional[int] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
UpperCAmelCase_ : str = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,__lowercase )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer("Hello" ,"World" )
UpperCAmelCase_ : Optional[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] ,__lowercase )
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
UpperCAmelCase_ : Dict = tokenizer.encode("sequence builders" ,add_special_tokens=__lowercase )
UpperCAmelCase_ : List[Any] = tokenizer.encode("multi-sequence build" ,add_special_tokens=__lowercase )
UpperCAmelCase_ : Tuple = tokenizer.encode(
"sequence builders" ,add_special_tokens=__lowercase ,add_prefix_space=__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(
"sequence builders" ,"multi-sequence build" ,add_special_tokens=__lowercase ,add_prefix_space=__lowercase )
UpperCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowercase )
UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(__lowercase ,__lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCAmelCase_ : str = tokenizer_class.from_pretrained("microsoft/deberta-base" )
UpperCAmelCase_ : Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCAmelCase_ : int = tokenizer(__lowercase ,padding=__lowercase )
UpperCAmelCase_ : List[Any] = [tokenizer.decode(__lowercase ,skip_special_tokens=__lowercase ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCAmelCase_ : Tuple = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCAmelCase_ : Optional[Any] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data ,__lowercase )
for expected, decoded in zip(__lowercase ,__lowercase ):
self.assertEqual(__lowercase ,__lowercase )
| 366 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = val
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ : Optional[int] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ : Union[str, Any] = value
else:
UpperCAmelCase_ : int = value
return new_state_dict
def a__ ( _SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ : str = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : List[Any] = in_proj_weight[:2_56, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[:2_56]
UpperCAmelCase_ : Dict = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ : Dict = in_proj_bias[2_56:5_12]
UpperCAmelCase_ : int = in_proj_weight[-2_56:, :]
UpperCAmelCase_ : Dict = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[:2_56, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[:2_56]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ : List[str] = in_proj_bias[2_56:5_12]
UpperCAmelCase_ : Optional[int] = in_proj_weight[-2_56:, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ : int = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ : List[str] = in_proj_weight_cross_attn[:2_56, :]
UpperCAmelCase_ : Dict = in_proj_bias_cross_attn[:2_56]
UpperCAmelCase_ : List[Any] = in_proj_weight_cross_attn[2_56:5_12, :]
UpperCAmelCase_ : int = in_proj_bias_cross_attn[2_56:5_12]
UpperCAmelCase_ : int = in_proj_weight_cross_attn[-2_56:, :]
UpperCAmelCase_ : str = in_proj_bias_cross_attn[-2_56:]
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image.size
UpperCAmelCase_ : int = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = 8_00 if "detection" in checkpoint_url else 10_00
UpperCAmelCase_ : str = target_max_size / current_max_size
UpperCAmelCase_ : Tuple = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = F.to_tensor(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = F.normalize(_SCREAMING_SNAKE_CASE , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
logger.info("Converting model..." )
# load original state dict
UpperCAmelCase_ : Union[str, Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = rename_backbone_keys(_SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(_SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ : Any = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ : Optional[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = val
# create HuggingFace model and load state dict
UpperCAmelCase_ : str = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCAmelCase_ : str = 15
UpperCAmelCase_ : str = 2
UpperCAmelCase_ : Union[str, Any] = {0: "table", 1: "table rotated"}
UpperCAmelCase_ : Tuple = idalabel
UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase_ : Tuple = 1_25
UpperCAmelCase_ : Tuple = 6
UpperCAmelCase_ : Union[str, Any] = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
UpperCAmelCase_ : str = idalabel
UpperCAmelCase_ : Any = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = DetrImageProcessor(
format="coco_detection" , max_size=8_00 if "detection" in checkpoint_url else 10_00 )
UpperCAmelCase_ : Optional[int] = TableTransformerForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# verify our conversion
UpperCAmelCase_ : Optional[Any] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
UpperCAmelCase_ : Dict = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = Image.open(_SCREAMING_SNAKE_CASE ).convert("RGB" )
UpperCAmelCase_ : int = normalize(resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ).unsqueeze(0 )
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
if "detection" in checkpoint_url:
UpperCAmelCase_ : Any = (1, 15, 3)
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
UpperCAmelCase_ : Dict = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
UpperCAmelCase_ : Union[str, Any] = (1, 1_25, 7)
UpperCAmelCase_ : List[str] = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
UpperCAmelCase_ : Any = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
UpperCAmelCase_ : List[str] = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCamelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 67 | 0 |
"""simple docstring"""
import random
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[int] = num - 1
lowercase__ : Optional[int] = 0
while s % 2 == 0:
lowercase__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
lowercase__ : List[str] = random.randrange(2 , num - 1 )
lowercase__ : Any = pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if v != 1:
lowercase__ : Optional[Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowercase__ : str = i + 1
lowercase__ : Optional[Any] = (v**2) % num
return True
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if num < 2:
return False
lowercase__ : Dict = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : int = 1024 ):
'''simple docstring'''
while True:
lowercase__ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_lowerCAmelCase ):
return num
if __name__ == "__main__":
_UpperCamelCase : Any = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 77 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
A: List[Any] = torch.load(__lowercase , map_location='''cpu''' )
return sd
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]:
A: Tuple = OrderedDict()
A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A: int = key
for name_pair in rename_keys_prefix:
A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] )
A: Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A: int = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
A: Optional[Any] = '''pretraining'''
if "vcr" in checkpoint_path:
A: Optional[int] = {'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
A: Tuple = {'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 5_1_2}
A: List[str] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
A: List[str] = {'''visual_embedding_dim''': 2_0_4_8}
A: Optional[int] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
A: Union[str, Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
A: Optional[int] = {
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
A: str = '''nlvr'''
A: Union[str, Any] = VisualBertConfig(**__lowercase )
# Load State Dict
A: Union[str, Any] = load_state_dict(__lowercase )
A: str = get_new_dict(__lowercase , __lowercase )
if model_type == "pretraining":
A: Optional[Any] = VisualBertForPreTraining(__lowercase )
elif model_type == "vqa":
A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase )
elif model_type == "nlvr":
A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase )
elif model_type == "multichoice":
A: Any = VisualBertForMultipleChoice(__lowercase )
model.load_state_dict(__lowercase )
# Save Checkpoints
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class UpperCAmelCase ( A_ ):
A__ : str = "data2vec-vision"
def __init__(self : Optional[Any] , snake_case__ : Optional[int]=7_68 , snake_case__ : str=12 , snake_case__ : Union[str, Any]=12 , snake_case__ : Tuple=30_72 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Dict=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : str=0.02 , snake_case__ : str=1e-12 , snake_case__ : List[str]=2_24 , snake_case__ : Dict=16 , snake_case__ : Union[str, Any]=3 , snake_case__ : Optional[int]=False , snake_case__ : List[Any]=False , snake_case__ : str=False , snake_case__ : Any=False , snake_case__ : List[str]=0.1 , snake_case__ : Any=0.1 , snake_case__ : str=True , snake_case__ : Union[str, Any]=[3, 5, 7, 11] , snake_case__ : Tuple=[1, 2, 3, 6] , snake_case__ : Optional[int]=True , snake_case__ : List[str]=0.4 , snake_case__ : Any=2_56 , snake_case__ : Optional[int]=1 , snake_case__ : Dict=False , snake_case__ : List[Any]=2_55 , **snake_case__ : str , ) -> int:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : str = hidden_size
snake_case : int = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Optional[int] = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : Tuple = initializer_range
snake_case : str = layer_norm_eps
snake_case : int = image_size
snake_case : Optional[int] = patch_size
snake_case : Any = num_channels
snake_case : List[str] = use_mask_token
snake_case : int = use_absolute_position_embeddings
snake_case : Any = use_relative_position_bias
snake_case : Union[str, Any] = use_shared_relative_position_bias
snake_case : Optional[int] = layer_scale_init_value
snake_case : List[str] = drop_path_rate
snake_case : str = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case : Any = out_indices
snake_case : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case : int = use_auxiliary_head
snake_case : str = auxiliary_loss_weight
snake_case : List[Any] = auxiliary_channels
snake_case : Union[str, Any] = auxiliary_num_convs
snake_case : Union[str, Any] = auxiliary_concat_input
snake_case : Optional[int] = semantic_loss_ignore_index
class UpperCAmelCase ( A_ ):
A__ : List[str] = version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> float:
'''simple docstring'''
return 1e-4
| 10 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowerCamelCase = Mapping[str, np.ndarray]
__lowerCamelCase = Mapping[str, Any] # Is a nested dict.
__lowerCamelCase = 0.01
@dataclasses.dataclass(frozen=A_ )
class UpperCAmelCase :
A__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
A__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
A__ : Optional[Sequence[int]] = None
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Dict = r"(\[[A-Z]+\]\n)"
snake_case : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
snake_case : List[str] = ["N", "CA", "C"]
snake_case : str = None
snake_case : str = None
snake_case : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
snake_case : Optional[Any] = "X" # FIXME: strings are immutable
snake_case : Optional[int] = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
snake_case : Union[str, Any] = np.array(__lowerCamelCase )
snake_case : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
snake_case : List[str] = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : int = 0 ):
snake_case : List[str] = []
snake_case : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case : Union[str, Any] = prot.parents
snake_case : Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case : Tuple = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
snake_case : int = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : str ):
snake_case : List[str] = []
snake_case : Any = pdb_str.split("\n" )
snake_case : int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
snake_case : Optional[Any] = []
if prot.parents_chain_index is not None:
snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
snake_case : List[str] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case : Optional[Any] = parent_dict.get(str(__lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case : Optional[Any] = [["N/A"]]
def make_parent_line(__lowerCamelCase : Sequence[str] ) -> str:
return f"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case : List[Any] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
snake_case : int = parents_per_chain[chain_counter]
else:
snake_case : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
snake_case : str = residue_constants.restypes + ["X"]
def res_atoa(__lowerCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
snake_case : List[Any] = residue_constants.atom_types
snake_case : List[str] = []
snake_case : Any = prot.atom_mask
snake_case : Any = prot.aatype
snake_case : Dict = prot.atom_positions
snake_case : List[str] = prot.residue_index.astype(np.intaa )
snake_case : Dict = prot.b_factors
snake_case : Tuple = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
snake_case : Any = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
snake_case : Dict = aatype.shape[0]
snake_case : Tuple = 1
snake_case : Any = 0
snake_case : Union[str, Any] = string.ascii_uppercase
snake_case : int = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
snake_case : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
snake_case : Any = "ATOM"
snake_case : str = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
snake_case : Optional[Any] = ""
snake_case : Dict = ""
snake_case : Optional[Any] = 1.00
snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case : Dict = ""
snake_case : Any = "A"
if chain_index is not None:
snake_case : str = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case : List[str] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
snake_case : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case : Any = True
snake_case : Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case : Optional[Any] = "TER"
snake_case : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( __lowerCamelCase : FeatureDict , __lowerCamelCase : ModelOutput , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Sequence[str]] = None , __lowerCamelCase : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 10 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[str] ={
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : int = """focalnet"""
def __init__(self : Optional[int] , __a : int=224 , __a : List[str]=4 , __a : int=3 , __a : List[Any]=96 , __a : str=False , __a : List[Any]=[192, 384, 768, 768] , __a : Any=[2, 2, 6, 2] , __a : int=[2, 2, 2, 2] , __a : List[str]=[3, 3, 3, 3] , __a : Dict="gelu" , __a : Tuple=4.0 , __a : Dict=0.0 , __a : List[str]=0.1 , __a : str=False , __a : Any=1E-4 , __a : Tuple=False , __a : Any=False , __a : Optional[int]=False , __a : Any=0.02 , __a : Dict=1E-5 , __a : str=32 , __a : Dict=None , __a : str=None , **__a : str , ):
super().__init__(**__a )
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = use_conv_embed
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = focal_levels
UpperCAmelCase_ = focal_windows
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = use_layerscale
UpperCAmelCase_ = layerscale_value
UpperCAmelCase_ = use_post_layernorm
UpperCAmelCase_ = use_post_layernorm_in_modulation
UpperCAmelCase_ = normalize_modulator
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = encoder_stride
UpperCAmelCase_ = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 1 | '''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ) -> None:
'''simple docstring'''
if start is None:
UpperCAmelCase_ = 0
if end is None:
UpperCAmelCase_ = len(snake_case_ ) - 1
if start >= end:
return
UpperCAmelCase_ = (start + end) // 2
slowsort(snake_case_ , snake_case_ , snake_case_ )
slowsort(snake_case_ , mid + 1 , snake_case_ )
if sequence[end] < sequence[mid]:
UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[end]
slowsort(snake_case_ , snake_case_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase_ = namedtuple('from_to', 'from_ to')
lowerCAmelCase_ = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.0_0_1, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'cubicyard': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'cubicfoot': from_to(0.0_2_8, 3_5.3_1_4_7),
'cup': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid \'from_type\' value: {from_type!r} Supported values are:\n"""
+ ''', '''.join(lowerCAmelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid \'to_type\' value: {to_type!r}. Supported values are:\n"""
+ ''', '''.join(lowerCAmelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = '#'
class __A :
'''simple docstring'''
def __init__( self : str ) -> None:
"""simple docstring"""
lowercase__ : dict = {}
def UpperCAmelCase ( self : List[str] ,_snake_case : str ) -> None:
"""simple docstring"""
lowercase__ : str = self._trie
for char in text:
if char not in trie:
lowercase__ : Union[str, Any] = {}
lowercase__ : Optional[Any] = trie[char]
lowercase__ : Dict = True
def UpperCAmelCase ( self : Tuple ,_snake_case : str ) -> tuple | list:
"""simple docstring"""
lowercase__ : Optional[Any] = self._trie
for char in prefix:
if char in trie:
lowercase__ : Union[str, Any] = trie[char]
else:
return []
return self._elements(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : dict ) -> tuple:
"""simple docstring"""
lowercase__ : str = []
for c, v in d.items():
lowercase__ : List[Any] = [''' '''] if c == END else [(c + s) for s in self._elements(_snake_case )]
result.extend(_snake_case )
return tuple(_snake_case )
lowerCAmelCase_ = Trie()
lowerCAmelCase_ = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def __UpperCAmelCase ( __lowerCamelCase ) -> tuple:
lowercase__ : List[Any] = trie.find_word(__lowerCamelCase )
return tuple(string + word for word in suffixes )
def __UpperCAmelCase ( ) -> None:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302 | 0 |
"""simple docstring"""
def _A ( lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
a =f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase )
if number < 1:
a =f'''Input value of [number={number}] must be > 0'''
raise ValueError(lowercase )
a =1
for i in range(1 , lowercase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 81 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | 262 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = "vivit"
def __init__( self : Dict ,_snake_case : List[Any]=224 ,_snake_case : List[Any]=32 ,_snake_case : Tuple=[2, 16, 16] ,_snake_case : Any=3 ,_snake_case : List[str]=768 ,_snake_case : Optional[Any]=12 ,_snake_case : Any=12 ,_snake_case : Dict=3_072 ,_snake_case : str="gelu_fast" ,_snake_case : Any=0.0 ,_snake_case : Tuple=0.0 ,_snake_case : Union[str, Any]=0.02 ,_snake_case : Optional[Any]=1e-06 ,_snake_case : Any=True ,**_snake_case : Any ,) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Any = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Any = num_frames
lowercase__ : Dict = tubelet_size
lowercase__ : int = num_channels
lowercase__ : str = qkv_bias
super().__init__(**_snake_case )
| 302 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCAmelCase_ = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCAmelCase_ = 'UperNetConfig'
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : int ,_snake_case : int ,_snake_case : Union[int, Tuple[int, int]] ,_snake_case : Union[int, Tuple[int, int], str] = 0 ,_snake_case : bool = False ,_snake_case : Union[int, Tuple[int, int]] = 1 ,) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = nn.Convad(
in_channels=_snake_case ,out_channels=_snake_case ,kernel_size=_snake_case ,padding=_snake_case ,bias=_snake_case ,dilation=_snake_case ,)
lowercase__ : Tuple = nn.BatchNormad(_snake_case )
lowercase__ : List[str] = nn.ReLU()
def UpperCAmelCase ( self : str ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.conv(_snake_case )
lowercase__ : List[str] = self.batch_norm(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : int ,_snake_case : int ,_snake_case : int ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = [
nn.AdaptiveAvgPoolad(_snake_case ),
UperNetConvModule(_snake_case ,_snake_case ,kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Any = input
for layer in self.layers:
lowercase__ : int = layer(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : Tuple[int, ...] ,_snake_case : int ,_snake_case : int ,_snake_case : bool ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = pool_scales
lowercase__ : Dict = align_corners
lowercase__ : Optional[Any] = in_channels
lowercase__ : Optional[Any] = channels
lowercase__ : int = []
for i, pool_scale in enumerate(_snake_case ):
lowercase__ : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=_snake_case ,in_channels=_snake_case ,channels=_snake_case )
self.blocks.append(_snake_case )
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Any ,_snake_case : torch.Tensor ) -> List[torch.Tensor]:
"""simple docstring"""
lowercase__ : int = []
for ppm in self.blocks:
lowercase__ : Any = ppm(_snake_case )
lowercase__ : int = nn.functional.interpolate(
_snake_case ,size=x.size()[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
ppm_outs.append(_snake_case )
return ppm_outs
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
super().__init__()
lowercase__ : str = config
lowercase__ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase__ : Optional[Any] = in_channels
lowercase__ : Any = config.hidden_size
lowercase__ : Optional[Any] = False
lowercase__ : Optional[int] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
# PSP Module
lowercase__ : Dict = UperNetPyramidPoolingModule(
self.pool_scales ,self.in_channels[-1] ,self.channels ,align_corners=self.align_corners ,)
lowercase__ : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
# FPN Module
lowercase__ : Any = nn.ModuleList()
lowercase__ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase__ : List[Any] = UperNetConvModule(_snake_case ,self.channels ,kernel_size=1 )
lowercase__ : Optional[int] = UperNetConvModule(self.channels ,self.channels ,kernel_size=3 ,padding=1 )
self.lateral_convs.append(_snake_case )
self.fpn_convs.append(_snake_case )
lowercase__ : int = UperNetConvModule(
len(self.in_channels ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : Dict = inputs[-1]
lowercase__ : Optional[int] = [x]
psp_outs.extend(self.psp_modules(_snake_case ) )
lowercase__ : Optional[Any] = torch.cat(_snake_case ,dim=1 )
lowercase__ : List[str] = self.bottleneck(_snake_case )
return output
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_snake_case ) )
# build top-down path
lowercase__ : List[Any] = len(_snake_case )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Union[str, Any] = laterals[i - 1].shape[2:]
lowercase__ : int = laterals[i - 1] + nn.functional.interpolate(
laterals[i] ,size=_snake_case ,mode='''bilinear''' ,align_corners=self.align_corners )
# build outputs
lowercase__ : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Any = nn.functional.interpolate(
fpn_outs[i] ,size=fpn_outs[0].shape[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
lowercase__ : Any = torch.cat(_snake_case ,dim=1 )
lowercase__ : Any = self.fpn_bottleneck(_snake_case )
lowercase__ : str = self.classifier(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : List[Any] ,_snake_case : int = 2 ,_snake_case : int = 3 ,_snake_case : Union[int, Tuple[int, int]] = 1 ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = config
lowercase__ : Dict = config.auxiliary_in_channels
lowercase__ : Optional[int] = config.auxiliary_channels
lowercase__ : List[Any] = config.auxiliary_num_convs
lowercase__ : List[Any] = config.auxiliary_concat_input
lowercase__ : str = in_index
lowercase__ : Any = (kernel_size // 2) * dilation
lowercase__ : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
if self.num_convs == 0:
lowercase__ : List[str] = nn.Identity()
else:
lowercase__ : Dict = nn.Sequential(*_snake_case )
if self.concat_input:
lowercase__ : int = UperNetConvModule(
self.in_channels + self.channels ,self.channels ,kernel_size=_snake_case ,padding=kernel_size // 2 )
lowercase__ : List[str] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : str = encoder_hidden_states[self.in_index]
lowercase__ : List[str] = self.convs(_snake_case )
if self.concat_input:
lowercase__ : Any = self.conv_cat(torch.cat([hidden_states, output] ,dim=1 ) )
lowercase__ : Dict = self.classifier(_snake_case )
return output
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = UperNetConfig
lowerCAmelCase : str = "pixel_values"
lowerCAmelCase : Dict = True
def UpperCAmelCase ( self : int ,_snake_case : str ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase ( self : int ,_snake_case : str ,_snake_case : str=False ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[Any] = value
lowerCAmelCase_ = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,A_ ,)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Tuple ) -> int:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : int = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase__ : Any = UperNetHead(_snake_case ,in_channels=self.backbone.channels )
lowercase__ : str = UperNetFCNHead(_snake_case ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_snake_case ,config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
lowercase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase__ : Optional[Any] = self.backbone.forward_with_filtered_kwargs(
_snake_case ,output_hidden_states=_snake_case ,output_attentions=_snake_case )
lowercase__ : Optional[int] = outputs.feature_maps
lowercase__ : Tuple = self.decode_head(_snake_case )
lowercase__ : Optional[int] = nn.functional.interpolate(_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : List[str] = None
if self.auxiliary_head is not None:
lowercase__ : str = self.auxiliary_head(_snake_case )
lowercase__ : Dict = nn.functional.interpolate(
_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : Any = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
lowercase__ : Union[str, Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase__ : Tuple = (logits,) + outputs[1:]
else:
lowercase__ : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
| 302 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( UpperCamelCase_ ):
_a = ['''image_processor''', '''tokenizer''']
_a = '''CLIPImageProcessor'''
_a = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self : Optional[Any] , A_ : Union[str, Any]=None , A_ : List[Any]=None , **A_ : int):
lowerCAmelCase_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A_ , )
lowerCAmelCase_ : str = kwargs.pop('''feature_extractor''')
lowerCAmelCase_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(A_ , A_)
def __call__( self : Optional[Any] , A_ : Dict=None , A_ : int=None , A_ : str=None , **A_ : List[str]):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
lowerCAmelCase_ : str = self.tokenizer(A_ , return_tensors=A_ , **A_)
if images is not None:
lowerCAmelCase_ : str = self.image_processor(A_ , return_tensors=A_ , **A_)
if text is not None and images is not None:
lowerCAmelCase_ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_) , tensor_type=A_)
def UpperCAmelCase__ ( self : Any , *A_ : Tuple , **A_ : List[str]):
return self.tokenizer.batch_decode(*A_ , **A_)
def UpperCAmelCase__ ( self : str , *A_ : List[str] , **A_ : Tuple):
return self.tokenizer.decode(*A_ , **A_)
@property
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Tuple = self.tokenizer.model_input_names
lowerCAmelCase_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 103 | '''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__UpperCAmelCase =True
except (ImportError, ModuleNotFoundError):
__UpperCAmelCase =False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
re.sub('''<n>''' , '''''' , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 67 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowercase_ = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
warnings.warn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
requires_backends(_SCREAMING_SNAKE_CASE , 'sklearn' )
return (preds == labels).mean()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
warnings.warn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
requires_backends(_SCREAMING_SNAKE_CASE , 'sklearn' )
lowercase__ = simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
warnings.warn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
requires_backends(_SCREAMING_SNAKE_CASE , 'sklearn' )
lowercase__ = pearsonr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
lowercase__ = spearmanr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
warnings.warn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
requires_backends(_SCREAMING_SNAKE_CASE , 'sklearn' )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), F"""Predictions and labels have mismatched lengths {len(_SCREAMING_SNAKE_CASE )} and {len(_SCREAMING_SNAKE_CASE )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif task_name == "mrpc":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif task_name == "sts-b":
return pearson_and_spearman(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif task_name == "qqp":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif task_name == "rte":
return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif task_name == "hans":
return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
warnings.warn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
requires_backends(_SCREAMING_SNAKE_CASE , 'sklearn' )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(F"""Predictions and labels have mismatched lengths {len(_SCREAMING_SNAKE_CASE )} and {len(_SCREAMING_SNAKE_CASE )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(_SCREAMING_SNAKE_CASE )
| 269 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = 'xlnet'
_UpperCamelCase : Optional[Any] = ['mems']
_UpperCamelCase : Union[str, Any] = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : str , a : int=32_000 , a : Any=1_024 , a : Optional[Any]=24 , a : str=16 , a : int=4_096 , a : List[str]="gelu" , a : Any=True , a : Dict="bi" , a : str=0.02 , a : List[str]=1E-1_2 , a : Tuple=0.1 , a : Optional[Any]=512 , a : Tuple=None , a : Union[str, Any]=True , a : int=False , a : int=False , a : Tuple=-1 , a : Any=False , a : Tuple="last" , a : List[str]=True , a : Optional[Any]="tanh" , a : List[Any]=0.1 , a : int=5 , a : List[Any]=5 , a : Optional[int]=5 , a : Dict=1 , a : Optional[Any]=2 , **a : List[Any] , )-> List[str]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = n_layer
lowercase__ = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
lowercase__ = d_model // n_head
lowercase__ = ff_activation
lowercase__ = d_inner
lowercase__ = untie_r
lowercase__ = attn_type
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = dropout
lowercase__ = mem_len
lowercase__ = reuse_len
lowercase__ = bi_data
lowercase__ = clamp_len
lowercase__ = same_length
lowercase__ = summary_type
lowercase__ = summary_use_proj
lowercase__ = summary_activation
lowercase__ = summary_last_dropout
lowercase__ = start_n_top
lowercase__ = end_n_top
lowercase__ = bos_token_id
lowercase__ = pad_token_id
lowercase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , a , )
lowercase__ = kwargs['use_cache']
lowercase__ = use_mems_eval
lowercase__ = use_mems_train
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Union[str, Any] )-> List[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 269 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "data2vec-vision"
def __init__(self : Optional[Any] , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Optional[int]=3_072 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[str]=1E-1_2 , UpperCAmelCase_ : Optional[Any]=224 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Any=0.4 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Optional[Any]=255 , **UpperCAmelCase_ : str , ) ->Tuple:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Optional[int] =num_attention_heads
lowerCamelCase__: Dict =intermediate_size
lowerCamelCase__: Optional[Any] =hidden_act
lowerCamelCase__: int =hidden_dropout_prob
lowerCamelCase__: Dict =attention_probs_dropout_prob
lowerCamelCase__: Optional[int] =initializer_range
lowerCamelCase__: Union[str, Any] =layer_norm_eps
lowerCamelCase__: Optional[int] =image_size
lowerCamelCase__: Optional[int] =patch_size
lowerCamelCase__: str =num_channels
lowerCamelCase__: Union[str, Any] =use_mask_token
lowerCamelCase__: Optional[Any] =use_absolute_position_embeddings
lowerCamelCase__: Optional[int] =use_relative_position_bias
lowerCamelCase__: int =use_shared_relative_position_bias
lowerCamelCase__: Optional[int] =layer_scale_init_value
lowerCamelCase__: List[Any] =drop_path_rate
lowerCamelCase__: int =use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase__: Optional[Any] =out_indices
lowerCamelCase__: Tuple =pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__: str =use_auxiliary_head
lowerCamelCase__: Optional[int] =auxiliary_loss_weight
lowerCamelCase__: Optional[Any] =auxiliary_channels
lowerCamelCase__: Union[str, Any] =auxiliary_num_convs
lowerCamelCase__: List[str] =auxiliary_concat_input
lowerCamelCase__: int =semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE_ (self : int) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def SCREAMING_SNAKE_CASE_ (self : Any) ->float:
'''simple docstring'''
return 1E-4
| 10 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =n
lowerCamelCase__: Tuple =[None] * self.n
lowerCamelCase__: str =0 # index of the first element
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[Any] =0
def __len__(self : str) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE_ (self : int) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL")
lowerCamelCase__: List[Any] =data
lowerCamelCase__: Dict =(self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW")
lowerCamelCase__: Optional[Any] =self.array[self.front]
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Dict =(self.front + 1) % self.n
self.size -= 1
return temp
| 10 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Dict = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : str = 'fnet'
def __init__( self , __UpperCamelCase=3_20_00 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu_new" , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=4 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=False , __UpperCamelCase=5_12 , __UpperCamelCase=3 , __UpperCamelCase=1 , __UpperCamelCase=2 , **__UpperCamelCase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Tuple = max_position_embeddings
__UpperCamelCase : List[str] = hidden_size
__UpperCamelCase : int = num_hidden_layers
__UpperCamelCase : List[str] = intermediate_size
__UpperCamelCase : str = hidden_act
__UpperCamelCase : List[str] = hidden_dropout_prob
__UpperCamelCase : Optional[int] = initializer_range
__UpperCamelCase : Dict = type_vocab_size
__UpperCamelCase : str = layer_norm_eps
__UpperCamelCase : Optional[Any] = use_tpu_fourier_optimizations
__UpperCamelCase : int = tpu_short_seq_length | 171 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Union[str, Any] = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 171 | 1 |
'''simple docstring'''
from maths.prime_check import is_prime
def a_ ( lowerCamelCase : int ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase )
if is_prime(lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = val
__a = None
__a = None
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__a = Node(__lowercase )
else:
self.left.insert(__lowercase )
elif val > self.val:
if self.right is None:
__a = Node(__lowercase )
else:
self.right.insert(__lowercase )
else:
__a = val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if root:
inorder(root.left , _SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
__a = Node(arr[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
__a = []
inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 302 | 0 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_SCREAMING_SNAKE_CASE = False
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Dict , _A : Any=32 ) -> int:
"""simple docstring"""
set_seed(0 )
snake_case_ : Optional[int] = UNetaDModel(sample_size=_A , in_channels=3 , out_channels=3 )
snake_case_ : Union[str, Any] = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1 )
return model, optimizer
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
snake_case_ : int = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=_A , )
snake_case_ : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=_A , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
snake_case_ : Any = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_A ) for _ in range(4 )]
snake_case_ : Optional[int] = [torch.randn((4, 3, 32, 32) ).to(_A ) for _ in range(4 )]
snake_case_ : List[str] = [torch.randint(0 , 1000 , (4,) ).long().to(_A ) for _ in range(4 )]
# train with a DDPM scheduler
snake_case_ : Dict = self.get_model_optimizer(resolution=32 )
model.train().to(_A )
for i in range(4 ):
optimizer.zero_grad()
snake_case_ : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
snake_case_ : str = model(_A , timesteps[i] ).sample
snake_case_ : Dict = torch.nn.functional.mse_loss(_A , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
snake_case_ : List[str] = self.get_model_optimizer(resolution=32 )
model.train().to(_A )
for i in range(4 ):
optimizer.zero_grad()
snake_case_ : Any = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
snake_case_ : int = model(_A , timesteps[i] ).sample
snake_case_ : Tuple = torch.nn.functional.mse_loss(_A , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_A , _A , atol=1E-5 ) )
self.assertTrue(torch.allclose(_A , _A , atol=1E-5 ) )
| 353 |
from __future__ import annotations
import pandas as pd
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Optional[Any] = [0] * no_of_processes
snake_case_ : Tuple = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__a ):
snake_case_ : Union[str, Any] = burst_time[i]
snake_case_ : Optional[Any] = 0
snake_case_ : Dict = 0
snake_case_ : Any = 9_99_99_99_99
snake_case_ : Tuple = 0
snake_case_ : List[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__a ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
snake_case_ : str = remaining_time[j]
snake_case_ : Any = j
snake_case_ : List[str] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
snake_case_ : Any = remaining_time[short]
if minm == 0:
snake_case_ : Dict = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
snake_case_ : List[str] = False
# Find finish time of current process
snake_case_ : List[str] = increment_time + 1
# Calculate waiting time
snake_case_ : Any = finish_time - arrival_time[short]
snake_case_ : Any = finar - burst_time[short]
if waiting_time[short] < 0:
snake_case_ : Optional[int] = 0
# Increment time
increment_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Tuple = [0] * no_of_processes
for i in range(__a ):
snake_case_ : str = burst_time[i] + waiting_time[i]
return turn_around_time
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : int = 0
snake_case_ : Optional[Any] = 0
for i in range(__a ):
snake_case_ : int = total_waiting_time + waiting_time[i]
snake_case_ : Optional[Any] = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
_SCREAMING_SNAKE_CASE = int(input())
_SCREAMING_SNAKE_CASE = [0] * no_of_processes
_SCREAMING_SNAKE_CASE = [0] * no_of_processes
_SCREAMING_SNAKE_CASE = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = map(int, input().split())
_SCREAMING_SNAKE_CASE = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_SCREAMING_SNAKE_CASE = burst_time
_SCREAMING_SNAKE_CASE = no_of_processes
_SCREAMING_SNAKE_CASE = waiting_time
_SCREAMING_SNAKE_CASE = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_SCREAMING_SNAKE_CASE = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 88 | 0 |
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__a = grid[0]
for row_n in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
__a = grid[row_n]
__a = fill_row(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = grid[row_n]
return grid[-1][-1]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = do_rescale
__a = do_normalize
__a = do_center_crop
__a = crop_size
__a = size
__a = resample
__a = rescale_factor
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__a = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase )
__a = resample if resample is not None else self.resample
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
__a = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 302 | 1 |
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any], _UpperCAmelCase : int, _UpperCAmelCase : MutableSequence[float] ) -> Optional[Any]:
"""simple docstring"""
if len(__lowerCAmelCase ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
SCREAMING_SNAKE_CASE__ : List[Any] = list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = degree
def __add__( self : List[str], _UpperCAmelCase : Polynomial ) -> str:
"""simple docstring"""
if self.degree > polynomial_a.degree:
SCREAMING_SNAKE_CASE__ : int = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __lowerCAmelCase )
def __sub__( self : Union[str, Any], _UpperCAmelCase : Polynomial ) -> List[Any]:
"""simple docstring"""
return self + polynomial_a * Polynomial(0, [-1] )
def __neg__( self : Dict ) -> int:
"""simple docstring"""
return Polynomial(self.degree, [-c for c in self.coefficients] )
def __mul__( self : Any, _UpperCAmelCase : Polynomial ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __lowerCAmelCase )
def A_ ( self : str, _UpperCAmelCase : int | float ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ""
for i in range(self.degree, -1, -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowerCAmelCase )
return polynomial
def __repr__( self : List[str] ) -> int:
"""simple docstring"""
return self.__str__()
def A_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = [0] * self.degree
for i in range(self.degree ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __lowerCAmelCase )
def A_ ( self : Any, _UpperCAmelCase : int | float = 0 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [0] * (self.degree + 2)
SCREAMING_SNAKE_CASE__ : Dict = constant
for i in range(self.degree + 1 ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __lowerCAmelCase )
def __eq__( self : List[str], _UpperCAmelCase : object ) -> Any:
"""simple docstring"""
if not isinstance(__lowerCAmelCase, __lowerCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple, _UpperCAmelCase : object ) -> Any:
"""simple docstring"""
return not self.__eq__(__lowerCAmelCase )
| 358 |
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> str:
'''simple docstring'''
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
SCREAMING_SNAKE_CASE__ : List[Any] = ""
while len(SCREAMING_SNAKE_CASE__ ) % 3 != 0:
SCREAMING_SNAKE_CASE__ : str = "0" + bin_string
SCREAMING_SNAKE_CASE__ : List[Any] = [
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for index, val in enumerate(SCREAMING_SNAKE_CASE__ ):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE__ ) )
oct_string += str(SCREAMING_SNAKE_CASE__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 191 | 0 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'owlvit_text_model'
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int]=4_9408 , _SCREAMING_SNAKE_CASE: Dict=512 , _SCREAMING_SNAKE_CASE: List[Any]=2048 , _SCREAMING_SNAKE_CASE: int=12 , _SCREAMING_SNAKE_CASE: Any=8 , _SCREAMING_SNAKE_CASE: Optional[Any]=16 , _SCREAMING_SNAKE_CASE: Tuple="quick_gelu" , _SCREAMING_SNAKE_CASE: str=1e-5 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: int=0.02 , _SCREAMING_SNAKE_CASE: int=1.0 , _SCREAMING_SNAKE_CASE: Dict=0 , _SCREAMING_SNAKE_CASE: Optional[int]=4_9406 , _SCREAMING_SNAKE_CASE: str=4_9407 , **_SCREAMING_SNAKE_CASE: int , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = vocab_size
__lowerCAmelCase : Optional[Any] = hidden_size
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : int = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : Dict = layer_norm_eps
__lowerCAmelCase : Tuple = attention_dropout
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : Optional[Any] = initializer_factor
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, os.PathLike] , **_SCREAMING_SNAKE_CASE: Tuple) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase , __lowerCAmelCase : Dict = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type") == "owlvit":
__lowerCAmelCase : str = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'owlvit_vision_model'
def __init__( self: Any , _SCREAMING_SNAKE_CASE: Tuple=768 , _SCREAMING_SNAKE_CASE: List[str]=3072 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Tuple=12 , _SCREAMING_SNAKE_CASE: Dict=3 , _SCREAMING_SNAKE_CASE: int=768 , _SCREAMING_SNAKE_CASE: Optional[int]=32 , _SCREAMING_SNAKE_CASE: Union[str, Any]="quick_gelu" , _SCREAMING_SNAKE_CASE: List[str]=1e-5 , _SCREAMING_SNAKE_CASE: int=0.0 , _SCREAMING_SNAKE_CASE: Tuple=0.02 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1.0 , **_SCREAMING_SNAKE_CASE: Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Optional[int] = num_channels
__lowerCAmelCase : Any = image_size
__lowerCAmelCase : Optional[Any] = patch_size
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : Tuple = layer_norm_eps
__lowerCAmelCase : List[Any] = attention_dropout
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : List[str] = initializer_factor
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Tuple , _SCREAMING_SNAKE_CASE: Union[str, os.PathLike] , **_SCREAMING_SNAKE_CASE: List[Any]) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type") == "owlvit":
__lowerCAmelCase : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'owlvit'
SCREAMING_SNAKE_CASE = True
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: Tuple=512 , _SCREAMING_SNAKE_CASE: Optional[Any]=2.6592 , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , **_SCREAMING_SNAKE_CASE: Tuple , ) -> int:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
if text_config is None:
__lowerCAmelCase : Union[str, Any] = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values.")
if vision_config is None:
__lowerCAmelCase : Optional[int] = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values.")
__lowerCAmelCase : str = OwlViTTextConfig(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = OwlViTVisionConfig(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = projection_dim
__lowerCAmelCase : Dict = logit_scale_init_value
__lowerCAmelCase : Any = return_dict
__lowerCAmelCase : Union[str, Any] = 1.0
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: str , _SCREAMING_SNAKE_CASE: Union[str, os.PathLike] , **_SCREAMING_SNAKE_CASE: List[str]) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase , __lowerCAmelCase : Tuple = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = {}
__lowerCAmelCase : Any = text_config
__lowerCAmelCase : Dict = vision_config
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__)
__lowerCAmelCase : str = self.text_config.to_dict()
__lowerCAmelCase : int = self.vision_config.to_dict()
__lowerCAmelCase : Dict = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
])
@property
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
])
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> float:
"""simple docstring"""
return 1e-4
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: "ProcessorMixin" , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: Optional["TensorType"] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = super().generate_dummy_inputs(
processor.image_processor , batch_size=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE)
return {**text_input_dict, **image_input_dict}
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
return 14 | 269 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__snake_case : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__snake_case : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'whisper'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Any=5_1865 , _SCREAMING_SNAKE_CASE: Optional[Any]=80 , _SCREAMING_SNAKE_CASE: Optional[int]=6 , _SCREAMING_SNAKE_CASE: Any=4 , _SCREAMING_SNAKE_CASE: Dict=6 , _SCREAMING_SNAKE_CASE: Dict=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=1536 , _SCREAMING_SNAKE_CASE: List[str]=1536 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: List[str]=5_0257 , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Optional[int]="gelu" , _SCREAMING_SNAKE_CASE: Tuple=256 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=1500 , _SCREAMING_SNAKE_CASE: str=448 , _SCREAMING_SNAKE_CASE: Any=5_0256 , _SCREAMING_SNAKE_CASE: Any=5_0256 , _SCREAMING_SNAKE_CASE: List[str]=5_0256 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: List[Any]=[220, 5_0256] , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: str=256 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: Tuple=0.05 , _SCREAMING_SNAKE_CASE: List[str]=10 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Optional[int]=10 , _SCREAMING_SNAKE_CASE: int=0 , _SCREAMING_SNAKE_CASE: Any=7 , **_SCREAMING_SNAKE_CASE: List[str] , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Optional[Any] = num_mel_bins
__lowerCAmelCase : int = d_model
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : List[Any] = encoder_attention_heads
__lowerCAmelCase : List[str] = decoder_layers
__lowerCAmelCase : Tuple = decoder_attention_heads
__lowerCAmelCase : Any = decoder_ffn_dim
__lowerCAmelCase : Tuple = encoder_ffn_dim
__lowerCAmelCase : List[str] = dropout
__lowerCAmelCase : Union[str, Any] = attention_dropout
__lowerCAmelCase : Union[str, Any] = activation_dropout
__lowerCAmelCase : Dict = activation_function
__lowerCAmelCase : Tuple = init_std
__lowerCAmelCase : str = encoder_layerdrop
__lowerCAmelCase : int = decoder_layerdrop
__lowerCAmelCase : Optional[int] = use_cache
__lowerCAmelCase : Union[str, Any] = encoder_layers
__lowerCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : int = max_source_positions
__lowerCAmelCase : Any = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : Dict = classifier_proj_size
__lowerCAmelCase : Dict = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : int = apply_spec_augment
__lowerCAmelCase : Union[str, Any] = mask_time_prob
__lowerCAmelCase : str = mask_time_length
__lowerCAmelCase : int = mask_time_min_masks
__lowerCAmelCase : List[Any] = mask_feature_prob
__lowerCAmelCase : Tuple = mask_feature_length
__lowerCAmelCase : Any = mask_feature_min_masks
__lowerCAmelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , suppress_tokens=_SCREAMING_SNAKE_CASE , begin_suppress_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowerCAmelCase : List[str] = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
])
if self.use_past:
__lowerCAmelCase : Tuple = {0: "batch"}
else:
__lowerCAmelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="inputs")
return common_inputs
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional["TensorType"] = None , _SCREAMING_SNAKE_CASE: int = 2_2050 , _SCREAMING_SNAKE_CASE: float = 5.0 , _SCREAMING_SNAKE_CASE: int = 220 , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = OrderedDict()
__lowerCAmelCase : Optional[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , time_duration=_SCREAMING_SNAKE_CASE , frequency=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = encoder_inputs["input_features"].shape[2]
__lowerCAmelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCAmelCase : List[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = encoder_inputs.pop("input_features")
__lowerCAmelCase : List[Any] = decoder_inputs.pop("decoder_input_ids")
if "past_key_values" in decoder_inputs:
__lowerCAmelCase : int = decoder_inputs.pop("past_key_values")
return dummy_inputs
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> float:
"""simple docstring"""
return 1e-3 | 269 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase :Union[str, Any] = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[str] = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[str] = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 275 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCAmelCase :str = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase :int = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any]=8 ):
"""simple docstring"""
__magic_name__ : List[str] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__magic_name__ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : MultilingualCLIP , _A : XLMRobertaTokenizer , _A : UNetaDConditionModel , _A : Union[DDIMScheduler, DDPMScheduler] , _A : VQModel , ) -> int:
super().__init__()
self.register_modules(
text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , movq=_A , )
__magic_name__ : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : Optional[Any] , _A : Optional[int] , _A : Dict , _A : str , _A : List[str] ) -> str:
if latents is None:
__magic_name__ : Any = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__magic_name__ : int = latents.to(_A )
__magic_name__ : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : List[Any] , _A : List[str] , _A : List[str] , _A : List[str] , _A : List[Any] , _A : str=None , ) -> Dict:
__magic_name__ : Optional[Any] = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
__magic_name__ : str = self.tokenizer(
_A , padding='max_length' , truncation=_A , max_length=77 , return_attention_mask=_A , add_special_tokens=_A , return_tensors='pt' , )
__magic_name__ : Optional[Any] = text_inputs.input_ids
__magic_name__ : Optional[Any] = self.tokenizer(_A , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_A , _A ):
__magic_name__ : str = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__magic_name__ : Union[str, Any] = text_input_ids.to(_A )
__magic_name__ : Dict = text_inputs.attention_mask.to(_A )
__magic_name__ , __magic_name__ : str = self.text_encoder(
input_ids=_A , attention_mask=_A )
__magic_name__ : Tuple = prompt_embeds.repeat_interleave(_A , dim=0 )
__magic_name__ : int = text_encoder_hidden_states.repeat_interleave(_A , dim=0 )
__magic_name__ : Union[str, Any] = text_mask.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
__magic_name__ : List[str]
if negative_prompt is None:
__magic_name__ : Optional[Any] = [''] * batch_size
elif type(_A ) is not type(_A ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !='
F' {type(_A )}.' )
elif isinstance(_A , _A ):
__magic_name__ : int = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.' )
else:
__magic_name__ : Dict = negative_prompt
__magic_name__ : List[str] = self.tokenizer(
_A , padding='max_length' , max_length=77 , truncation=_A , return_attention_mask=_A , add_special_tokens=_A , return_tensors='pt' , )
__magic_name__ : Optional[int] = uncond_input.input_ids.to(_A )
__magic_name__ : Optional[Any] = uncond_input.attention_mask.to(_A )
__magic_name__ , __magic_name__ : int = self.text_encoder(
input_ids=_A , attention_mask=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__magic_name__ : List[str] = negative_prompt_embeds.shape[1]
__magic_name__ : str = negative_prompt_embeds.repeat(1 , _A )
__magic_name__ : Dict = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A )
__magic_name__ : Any = uncond_text_encoder_hidden_states.shape[1]
__magic_name__ : Optional[int] = uncond_text_encoder_hidden_states.repeat(1 , _A , 1 )
__magic_name__ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _A , -1 )
__magic_name__ : List[Any] = uncond_text_mask.repeat_interleave(_A , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__magic_name__ : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__magic_name__ : str = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__magic_name__ : str = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCAmelCase ( self : Dict , _A : List[Any]=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__magic_name__ : List[Any] = torch.device(F'cuda:{gpu_id}' )
__magic_name__ : Dict = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def __lowerCAmelCase ( self : List[Any] , _A : List[str]=0 ) -> str:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__magic_name__ : int = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__magic_name__ : Optional[int] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__magic_name__ , __magic_name__ : Union[str, Any] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
if self.safety_checker is not None:
__magic_name__ , __magic_name__ : List[str] = cpu_offload_with_hook(self.safety_checker , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__magic_name__ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : int ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : int , _A : Union[str, List[str]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Optional[Union[str, List[str]]] = None , _A : int = 512 , _A : int = 512 , _A : int = 100 , _A : float = 4.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , ) -> Optional[int]:
if isinstance(_A , _A ):
__magic_name__ : Optional[int] = 1
elif isinstance(_A , _A ):
__magic_name__ : Union[str, Any] = len(_A )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(_A )}' )
__magic_name__ : Tuple = self._execution_device
__magic_name__ : Any = batch_size * num_images_per_prompt
__magic_name__ : int = guidance_scale > 1.0
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = self._encode_prompt(
_A , _A , _A , _A , _A )
if isinstance(_A , _A ):
__magic_name__ : Union[str, Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__magic_name__ : Dict = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
__magic_name__ : Dict = image_embeds.repeat_interleave(_A , dim=0 )
__magic_name__ : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
__magic_name__ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
__magic_name__ : Tuple = self.scheduler.timesteps
__magic_name__ : Optional[int] = self.unet.config.in_channels
__magic_name__ , __magic_name__ : Dict = get_new_h_w(_A , _A , self.movq_scale_factor )
# create initial latent
__magic_name__ : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__magic_name__ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__magic_name__ : Tuple = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__magic_name__ : Union[str, Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__magic_name__ , __magic_name__ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
__magic_name__ , __magic_name__ : Dict = noise_pred.chunk(2 )
__magic_name__ , __magic_name__ : List[str] = variance_pred.chunk(2 )
__magic_name__ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__magic_name__ : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__magic_name__ , __magic_name__ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : List[Any] = self.scheduler.step(
_A , _A , _A , generator=_A , ).prev_sample
# post-processing
__magic_name__ : int = self.movq.decode(_A , force_not_quantize=_A )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__magic_name__ : Dict = image * 0.5 + 0.5
__magic_name__ : str = image.clamp(0 , 1 )
__magic_name__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__magic_name__ : str = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A ) | 275 | 1 |
"""simple docstring"""
from collections import deque
def a__ ( lowerCAmelCase ) -> Tuple:
UpperCAmelCase__ : str = len(lowerCAmelCase )
UpperCAmelCase__ : Dict = deque()
UpperCAmelCase__ : Any = [False for _ in range(lowerCAmelCase )]
UpperCAmelCase__ : Tuple = [-1 for _ in range(lowerCAmelCase )]
UpperCAmelCase__ : int = index_of[:]
def strong_connect(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : str = index # the number when this node is seen
UpperCAmelCase__ : Union[str, Any] = index # lowest rank node reachable from here
index += 1
stack.append(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase__ : int = strong_connect(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase__ : str = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : int = stack.pop()
UpperCAmelCase__ : Optional[int] = False
component.append(lowerCAmelCase )
while w != v:
UpperCAmelCase__ : Any = stack.pop()
UpperCAmelCase__ : List[Any] = False
component.append(lowerCAmelCase )
components.append(lowerCAmelCase )
return index
UpperCAmelCase__ : str = []
for v in range(lowerCAmelCase ):
if index_of[v] == -1:
strong_connect(lowerCAmelCase , 0 , lowerCAmelCase )
return components
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
UpperCAmelCase__ : Dict = [[] for _ in range(lowerCAmelCase )]
for u, v in edges:
g[u].append(lowerCAmelCase )
return g
if __name__ == "__main__":
# Test
_A = 7
_A = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_A = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_A = [(u, v) for u, v in zip(source, target)]
_A = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 171 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""OwlViTFeatureExtractor"""]
_A = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 171 | 1 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __UpperCAmelCase ( snake_case_ : Tuple ) -> List[Any]:
"""simple docstring"""
return "".join(sorted(__a ) )
def __UpperCAmelCase ( snake_case_ : Any ) -> Optional[Any]:
"""simple docstring"""
return word_by_signature[signature(__a )]
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
SCREAMING_SNAKE_CASE : Dict = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE : Dict = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 350 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 317 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase_ ( _A ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , 'tf_padding' ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , 'depth_multiplier' ) )
class UpperCAmelCase_ :
def __init__( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Any=1_3 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : str=3_2 , UpperCAmelCase__ : Any=0.25 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : int=1_0_2_4 , UpperCAmelCase__ : str=3_2 , UpperCAmelCase__ : List[Any]="relu6" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=1_0 , UpperCAmelCase__ : Optional[Any]=None , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = depth_multiplier
lowerCAmelCase = min_depth
lowerCAmelCase = tf_padding
lowerCAmelCase = int(last_hidden_size * depth_multiplier )
lowerCAmelCase = output_stride
lowerCAmelCase = hidden_act
lowerCAmelCase = classifier_dropout_prob
lowerCAmelCase = use_labels
lowerCAmelCase = is_training
lowerCAmelCase = num_labels
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self : int ) -> int:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = MobileNetVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ) -> Dict:
lowerCAmelCase = self.num_labels
lowerCAmelCase = MobileNetVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
lowerCamelCase : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowerCamelCase : int = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : List[str] = False
lowerCamelCase : List[Any] = False
lowerCamelCase : int = False
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
lowerCAmelCase = MobileNetVaModelTester(self )
lowerCAmelCase = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def __UpperCAmelCase ( self : str ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds' )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings' )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
pass
@unittest.skip(reason='MobileNetV1 does not output attentions' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCamelCase__ )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
def check_hidden_states_output(UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict ):
lowerCAmelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = 2_6
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = MobileNetVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def a_ ( ):
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : int ) -> str:
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(UpperCamelCase__ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCAmelCase = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 4 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowercase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__magic_name__ = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = generator("""Something there""" )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
__magic_name__ = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
__magic_name__ = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
__magic_name__ = 3
__magic_name__ = generator(
"""Something there""" , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
__magic_name__ = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = generator("""This is a test""" , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
__magic_name__ = generator.model.config.eos_token_id
__magic_name__ = """<pad>"""
__magic_name__ = generator(
["""This is a test""", """This is a second test"""] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowercase ( self : int ) -> str:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
| 88 | 0 |
import os
from datetime import datetime as dt
from github import Github
SCREAMING_SNAKE_CASE_ = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def __SCREAMING_SNAKE_CASE ( ) -> int:
_UpperCAmelCase : Tuple = Github(os.environ["GITHUB_TOKEN"] )
_UpperCAmelCase : Tuple = g.get_repo("huggingface/diffusers" )
_UpperCAmelCase : Optional[Any] = repo.get_issues(state="open" )
for issue in open_issues:
_UpperCAmelCase : Tuple = sorted(issue.get_comments() , key=lambda lowerCAmelCase : i.created_at , reverse=lowerCAmelCase )
_UpperCAmelCase : List[Any] = comments[0] if len(lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 189 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCAmelCase : Optional[int] = k.replace(lowerCAmelCase , lowerCAmelCase )
if k.startswith("encoder" ):
_UpperCAmelCase : Tuple = k.replace(".attn" , ".self_attn" )
_UpperCAmelCase : str = k.replace("norm1" , "self_attn_layer_norm" )
_UpperCAmelCase : Optional[Any] = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
_UpperCAmelCase : Union[str, Any] = k.replace("norm1" , "self_attn_layer_norm" )
_UpperCAmelCase : Tuple = k.replace("norm2" , "encoder_attn_layer_norm" )
_UpperCAmelCase : Any = k.replace("norm3" , "final_layer_norm" )
return k
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> List[Any]:
_UpperCAmelCase : int = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
_UpperCAmelCase : Dict = sd.pop(lowerCAmelCase )
_UpperCAmelCase : str = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
_UpperCAmelCase : Dict = v
SCREAMING_SNAKE_CASE_ = ['START']
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: int , lowerCAmelCase: int ) -> int:
_UpperCAmelCase : Union[str, Any] = torch.load(lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : List[str] = model["model"]
_UpperCAmelCase : List[str] = BlenderbotConfig.from_json_file(lowerCAmelCase )
_UpperCAmelCase : str = BlenderbotForConditionalGeneration(lowerCAmelCase )
_UpperCAmelCase : List[str] = m.model.state_dict().keys()
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCAmelCase : Tuple = rename_state_dict_key(lowerCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCAmelCase : Tuple = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCAmelCase )
m.model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
m.half()
m.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 189 | 1 |
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
if index == r:
for j in range(snake_case_ ):
print(data[j],end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_A : Tuple = arr[i]
combination_util(snake_case_,snake_case_,snake_case_,index + 1,snake_case_,i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# A temporary array to store all combination one by one
_A : str = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(snake_case_,snake_case_,snake_case_,0,snake_case_,0 )
if __name__ == "__main__":
# Driver code to check the function above
_snake_case = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 26 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase_ = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["DPTFeatureExtractor"]
lowerCamelCase_ = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 191 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> None:
_lowerCamelCase = generate_pascal_triangle(snake_case )
for row_idx in range(snake_case ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> list[list[int]]:
if not isinstance(snake_case , snake_case ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
_lowerCamelCase = []
for current_row_idx in range(snake_case ):
_lowerCamelCase = populate_current_row(snake_case , snake_case )
triangle.append(snake_case )
return triangle
def SCREAMING_SNAKE_CASE_ ( snake_case : list[list[int]] , snake_case : int )-> list[int]:
_lowerCamelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_lowerCamelCase , _lowerCamelCase = 1, 1
for current_col_idx in range(1 , snake_case ):
calculate_current_element(
snake_case , snake_case , snake_case , snake_case )
return current_row
def SCREAMING_SNAKE_CASE_ ( snake_case : list[list[int]] , snake_case : list[int] , snake_case : int , snake_case : int , )-> None:
_lowerCamelCase = triangle[current_row_idx - 1][current_col_idx - 1]
_lowerCamelCase = triangle[current_row_idx - 1][current_col_idx]
_lowerCamelCase = above_to_left_elt + above_to_right_elt
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> list[list[int]]:
if not isinstance(snake_case , snake_case ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
_lowerCamelCase = [[1]]
for row_index in range(1 , snake_case ):
_lowerCamelCase = [0] + result[-1] + [0]
_lowerCamelCase = row_index + 1
# Calculate the number of distinct elements in a row
_lowerCamelCase = sum(divmod(snake_case , 2 ) )
_lowerCamelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_lowerCamelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_lowerCamelCase = row_first_half + row_second_half
result.append(snake_case )
return result
def SCREAMING_SNAKE_CASE_ ( )-> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case : Callable , snake_case : int ) -> None:
_lowerCamelCase = f'{func.__name__}({value})'
_lowerCamelCase = timeit(f'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(snake_case , snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 80 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A_ : List[str] =logging.get_logger(__name__)
A_ : Optional[Any] ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A_ : Tuple ={
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A_ : Any ={
"""junnyu/roformer_chinese_small""": 1_5_3_6,
"""junnyu/roformer_chinese_base""": 1_5_3_6,
"""junnyu/roformer_chinese_char_small""": 5_1_2,
"""junnyu/roformer_chinese_char_base""": 5_1_2,
"""junnyu/roformer_small_discriminator""": 1_2_8,
"""junnyu/roformer_small_generator""": 1_2_8,
}
A_ : List[str] ={
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : List[Any] = RoFormerTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , a__ ) != do_lower_case
or pre_tok_state.get('strip_accents' , a__ ) != strip_accents
):
_lowerCamelCase = getattr(a__ , pre_tok_state.pop('type' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = pre_tok_class(**a__ )
_lowerCamelCase = do_lower_case
def __getstate__( self ):
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = BertPreTokenizer()
return state
def __setstate__( self , a__ ):
_lowerCamelCase = d
_lowerCamelCase = self.__dict__['_tokenizer'].get_vocab()
_lowerCamelCase = PreTokenizer.custom(JiebaPreTokenizer(a__ ) )
def snake_case_ ( self , a__ , a__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def snake_case_ ( self , a__ , a__=None , a__=None , a__=False , **a__ , ):
_lowerCamelCase = BertPreTokenizer()
return super().save_pretrained(a__ , a__ , a__ , a__ , **a__ )
| 80 | 1 |
def _lowercase ( lowercase__ ):
return "".join(chr(ord(lowercase__ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 275 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _lowercase ( lowercase__ ):
__lowerCAmelCase : str = []
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : str = []
for rt in rc.restypes:
__lowerCAmelCase : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__lowerCAmelCase : List[str] = {name: i for i, name in enumerate(lowercase__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
__lowerCAmelCase : List[Any] = torch.tensor(
lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__lowerCAmelCase : Optional[Any] = torch.tensor(
lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__lowerCAmelCase : Tuple = torch.tensor(
lowercase__ , dtype=torch.floataa , device=protein['''aatype'''].device , )
__lowerCAmelCase : List[Any] = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__lowerCAmelCase : Any = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase : int = residx_atomaa_mask
__lowerCAmelCase : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__lowerCAmelCase : int = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase : Union[str, Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__lowerCAmelCase : str = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
__lowerCAmelCase : Optional[int] = rc.restype_atoa[restype_letter]
__lowerCAmelCase : Optional[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__lowerCAmelCase : str = rc.atom_order[atom_name]
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase : Any = residx_atomaa_mask
return protein
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = tree_map(lambda lowercase__ : torch.tensor(lowercase__ , device=batch['''aatype'''].device ) , lowercase__ , np.ndarray )
__lowerCAmelCase : Tuple = tensor_tree_map(lambda lowercase__ : np.array(lowercase__ ) , make_atomaa_masks(lowercase__ ) )
return out
| 275 | 1 |
A__ = 256
# Modulus to hash a string
A__ = 100_0003
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
"""simple docstring"""
snake_case__ : str = len(__lowerCAmelCase )
snake_case__ : Optional[int] = len(__lowerCAmelCase )
if p_len > t_len:
return False
snake_case__ : str = 0
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCAmelCase ):
snake_case__ : int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case__ : str = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case__ : str = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case__ : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
snake_case__ : Optional[int] = '''abc1abc12'''
snake_case__ : Dict = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
snake_case__ : int = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase ) and not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 2)
snake_case__ : int = '''ABABX'''
snake_case__ : Any = '''ABABZABABYABABX'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 3)
snake_case__ : Dict = '''AAAB'''
snake_case__ : Union[str, Any] = '''ABAAAAAB'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 4)
snake_case__ : Union[str, Any] = '''abcdabcy'''
snake_case__ : Optional[Any] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 5)
snake_case__ : Dict = '''Lü'''
snake_case__ : Optional[Any] = '''Lüsai'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : str = '''Lue'''
assert not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 44 |
import math
import tensorflow as tf
from packaging import version
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : List[str] = tf.convert_to_tensor(__lowerCAmelCase )
snake_case__ : Dict = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Dict = tf.convert_to_tensor(__lowerCAmelCase )
snake_case__ : Tuple = tf.cast(math.pi , x.dtype )
snake_case__ : str = tf.cast(0.044_715 , x.dtype )
snake_case__ : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__lowerCAmelCase , 3 )) ))
return x * cdf
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict = tf.convert_to_tensor(__lowerCAmelCase )
return x * tf.tanh(tf.math.softplus(__lowerCAmelCase ) )
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[str] = tf.convert_to_tensor(__lowerCAmelCase )
snake_case__ : str = tf.cast(0.044_715 , x.dtype )
snake_case__ : Optional[Any] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = tf.convert_to_tensor(__lowerCAmelCase )
snake_case__ : Optional[Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
return tf.clip_by_value(_gelu(__lowerCAmelCase ) , -10 , 10 )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=-1 ) -> Optional[Any]:
"""simple docstring"""
snake_case__ , snake_case__ : str = tf.split(__lowerCAmelCase , 2 , axis=__lowerCAmelCase )
return a * tf.math.sigmoid(__lowerCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return tf.keras.activations.gelu(__lowerCAmelCase , approximate=__lowerCAmelCase )
A__ = tf.keras.activations.gelu
A__ = approximate_gelu_wrap
else:
A__ = _gelu
A__ = _gelu_new
A__ = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 44 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : Tuple = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'blip_text_model'
def __init__( self , a=3_05_24 , a=7_68 , a=7_68 , a=30_72 , a=7_68 , a=12 , a=8 , a=5_12 , a="gelu" , a=1e-12 , a=0.0 , a=0.0 , a=0.02 , a=3_05_22 , a=2 , a=0 , a=1_02 , a=True , a=True , **a , ):
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , sep_token_id=a , **a , )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = encoder_hidden_size
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = projection_dim
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = hidden_act
UpperCamelCase__ = initializer_range
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = is_decoder
UpperCamelCase__ = use_cache
@classmethod
def __a ( cls , a , **a ):
cls._set_token_in_kwargs(a )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(a , **a )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
UpperCamelCase__ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a , **a )
class lowercase_ ( a__ ):
__UpperCAmelCase = 'blip_vision_model'
def __init__( self , a=7_68 , a=30_72 , a=5_12 , a=12 , a=12 , a=3_84 , a=16 , a="gelu" , a=1e-5 , a=0.0 , a=1e-10 , **a , ):
super().__init__(**a )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = projection_dim
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = patch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = hidden_act
@classmethod
def __a ( cls , a , **a ):
cls._set_token_in_kwargs(a )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
UpperCamelCase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a , **a )
class lowercase_ ( a__ ):
__UpperCAmelCase = 'blip'
__UpperCAmelCase = True
def __init__( self , a=None , a=None , a=5_12 , a=2.6592 , a=2_56 , **a , ):
super().__init__(**a )
if text_config is None:
UpperCamelCase__ = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
UpperCamelCase__ = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
UpperCamelCase__ = BlipTextConfig(**a )
UpperCamelCase__ = BlipVisionConfig(**a )
UpperCamelCase__ = self.vision_config.hidden_size
UpperCamelCase__ = projection_dim
UpperCamelCase__ = logit_scale_init_value
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.02
UpperCamelCase__ = image_text_hidden_size
@classmethod
def __a ( cls , a , a , **a ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a )
def __a ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.text_config.to_dict()
UpperCamelCase__ = self.vision_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 80 |
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""image_processor""", """feature_extractor"""]
snake_case_ : List[Any] = """TvltImageProcessor"""
snake_case_ : Dict = """TvltFeatureExtractor"""
def __init__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
super().__init__(image_processor=lowerCAmelCase , feature_extractor=lowerCAmelCase)
_snake_case : List[Any] = image_processor
_snake_case : List[Any] = feature_extractor
def __call__( self : Union[str, Any] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Dict=False , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Any , ) -> Any:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""")
_snake_case : Union[str, Any] = None
if images is not None:
_snake_case : Any = self.image_processor(lowerCAmelCase , mask_pixel=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if images_mixed is not None:
_snake_case : Union[str, Any] = self.image_processor(lowerCAmelCase , is_mixed=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if audio is not None:
_snake_case : int = self.feature_extractor(
lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , mask_audio=lowerCAmelCase , **lowerCAmelCase)
_snake_case : Any = {}
if audio is not None:
output_dict.update(lowerCAmelCase)
if images is not None:
output_dict.update(lowerCAmelCase)
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase)
return output_dict
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_snake_case : Optional[Any] = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 317 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str=1_3 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : str=1_9 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : int=3_7 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[Any]=5_1_2 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Optional[Any]=None , ) -> Optional[int]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : str ) -> Tuple:
__lowerCAmelCase = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowerCAmelCase_ , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def lowercase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> List[str]:
__lowerCAmelCase = EsmForProteinFolding(config=lowerCAmelCase_ ).float()
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase ( self : List[Any] ) -> str:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = False
a_ = (EsmForProteinFolding,) if is_torch_available() else ()
a_ = ()
a_ = {} if is_torch_available() else {}
a_ = False
def lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = EsmFoldModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Optional[Any] ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip('Does not support attention outputs' )
def lowercase ( self : Optional[int] ) -> List[str]:
pass
@unittest.skip
def lowercase ( self : Dict ) -> int:
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def lowercase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase ( self : Optional[Any] ) -> int:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase ( self : Union[str, Any] ) -> List[str]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase ( self : str ) -> Dict:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase ( self : Union[str, Any] ) -> Dict:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase ( self : Any ) -> Optional[int]:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def lowercase ( self : int ) -> str:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def lowercase ( self : str ) -> Dict:
pass
@unittest.skip('ESMFold only has one output format.' )
def lowercase ( self : Any ) -> Dict:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def lowercase ( self : List[Any] ) -> List[str]:
pass
@unittest.skip('ESMFold does not support input chunking.' )
def lowercase ( self : int ) -> Dict:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def lowercase ( self : Dict ) -> Optional[int]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase ( self : Dict ) -> Any:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase ( self : Any ) -> Tuple:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def lowercase ( self : Union[str, Any] ) -> int:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : List[str] ) -> Optional[int]:
pass
@require_torch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[Any] ) -> Dict:
__lowerCAmelCase = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
__lowerCAmelCase = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
__lowerCAmelCase = model(lowerCAmelCase_ )['positions']
__lowerCAmelCase = torch.tensor([2.58_28, 0.79_93, -10.93_34] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowerCAmelCase_ , atol=1e-4 ) )
| 207 |
from functools import lru_cache
@lru_cache
def a_ ( lowerCAmelCase_ : int ):
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str =logging.get_logger(__name__)
lowerCamelCase : Optional[int] ={
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __a ( A__ ):
_lowerCAmelCase : Any = '''realm'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict=3_05_22 , SCREAMING_SNAKE_CASE : Dict=7_68 , SCREAMING_SNAKE_CASE : Tuple=1_28 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[Any]=12 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : Optional[Any]=30_72 , SCREAMING_SNAKE_CASE : Any="gelu_new" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : str=5_12 , SCREAMING_SNAKE_CASE : Tuple=2 , SCREAMING_SNAKE_CASE : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[int]=1e-1_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=2_56 , SCREAMING_SNAKE_CASE : Union[str, Any]=10 , SCREAMING_SNAKE_CASE : Union[str, Any]=1e-3 , SCREAMING_SNAKE_CASE : str=5 , SCREAMING_SNAKE_CASE : Any=3_20 , SCREAMING_SNAKE_CASE : Optional[Any]=13_35_37_18 , SCREAMING_SNAKE_CASE : str=50_00 , SCREAMING_SNAKE_CASE : Dict=1 , SCREAMING_SNAKE_CASE : Optional[int]=0 , SCREAMING_SNAKE_CASE : Optional[int]=2 , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Common config
UpperCamelCase__ : Union[str, Any] = vocab_size
UpperCamelCase__ : Optional[Any] = max_position_embeddings
UpperCamelCase__ : Tuple = hidden_size
UpperCamelCase__ : Dict = retriever_proj_size
UpperCamelCase__ : Dict = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : Dict = num_candidates
UpperCamelCase__ : List[Any] = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_act
UpperCamelCase__ : Optional[Any] = hidden_dropout_prob
UpperCamelCase__ : Dict = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : Tuple = type_vocab_size
UpperCamelCase__ : List[str] = layer_norm_eps
# Reader config
UpperCamelCase__ : int = span_hidden_size
UpperCamelCase__ : Any = max_span_width
UpperCamelCase__ : int = reader_layer_norm_eps
UpperCamelCase__ : Union[str, Any] = reader_beam_size
UpperCamelCase__ : Any = reader_seq_len
# Retrieval config
UpperCamelCase__ : int = num_block_records
UpperCamelCase__ : List[str] = searcher_beam_size | 189 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCamelCase : int =TypeVar('''T''')
class __a ( Generic[T] ):
_lowerCAmelCase : deque[T] # Cache store of keys
_lowerCAmelCase : set[T] # References of the keys in cache
_lowerCAmelCase : int = 1_0 # Maximum capacity of cache
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
UpperCamelCase__ : Tuple = deque()
UpperCamelCase__ : Optional[int] = set()
if not n:
UpperCamelCase__ : Tuple = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
UpperCamelCase__ : Dict = n
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase__ : int = self.dq_store.pop()
self.key_reference.remove(SCREAMING_SNAKE_CASE )
else:
self.dq_store.remove(SCREAMING_SNAKE_CASE )
self.dq_store.appendleft(SCREAMING_SNAKE_CASE )
self.key_reference.add(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict ):
'''simple docstring'''
for k in self.dq_store:
print(SCREAMING_SNAKE_CASE )
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : LRUCache[str | int] =LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]" | 189 | 1 |
"""simple docstring"""
from string import ascii_uppercase
UpperCAmelCase__ = {str(ord(c) - 55): c for c in ascii_uppercase}
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> str:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
_snake_case = ''''''
_snake_case = 0
_snake_case = 0
while div != 1:
_snake_case , _snake_case = divmod(__lowerCamelCase , __lowerCamelCase )
if base >= 11 and 9 < mod < 36:
_snake_case = ALPHABET_VALUES[str(__lowerCamelCase )]
else:
_snake_case = str(__lowerCamelCase )
new_value += actual_value
_snake_case = num // base
_snake_case = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__lowerCamelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 40 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase__ :
def __init__( self : Any , _lowerCamelCase : Optional[Any] , ):
_snake_case = parent
_snake_case = 13
_snake_case = 7
_snake_case = 30
_snake_case = self.seq_length + self.mem_len
_snake_case = 15
_snake_case = True
_snake_case = True
_snake_case = 99
_snake_case = [10, 50, 80]
_snake_case = 32
_snake_case = 32
_snake_case = 4
_snake_case = 8
_snake_case = 128
_snake_case = 2
_snake_case = 2
_snake_case = None
_snake_case = 1
_snake_case = 0
_snake_case = 3
_snake_case = self.vocab_size - 1
_snake_case = 0.0_1
def lowercase ( self : Optional[int] ):
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowercase ( self : Any ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowercase ( self : Dict , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
_snake_case = TFTransfoXLModel(_lowerCamelCase )
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
_snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a}
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase ( self : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple ):
_snake_case = TFTransfoXLLMHeadModel(_lowerCamelCase )
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
_snake_case = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
_snake_case , _snake_case = model([input_ids_a, mems_a] ).to_tuple()
_snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase ( self : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] ):
_snake_case = TFTransfoXLForSequenceClassification(_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : str ):
_snake_case = self.prepare_config_and_inputs()
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) = config_and_inputs
_snake_case = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__a = () if is_tf_available() else ()
__a = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowercase ( self : List[Any] ):
_snake_case = TFTransfoXLModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , d_embed=37 )
def lowercase ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
self.model_tester.set_seed()
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_lowerCamelCase )
def lowercase ( self : str ):
self.model_tester.set_seed()
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_lowerCamelCase )
def lowercase ( self : str ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_lowerCamelCase )
def lowercase ( self : str ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_snake_case = model.get_output_embeddings()
assert isinstance(_lowerCamelCase , tf.keras.layers.Layer )
_snake_case = model.get_bias()
assert name is None
else:
_snake_case = model.get_output_embeddings()
assert x is None
_snake_case = model.get_bias()
assert name is None
def lowercase ( self : Optional[Any] ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowercase ( self : int ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFTransfoXLModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowercase ( self : int ):
pass
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowercase ( self : List[Any] ):
_snake_case = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
_snake_case = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_snake_case = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_snake_case = model.generate(_lowerCamelCase , max_length=200 , do_sample=_lowerCamelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCamelCase )
| 40 | 1 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a__ : int = re.compile(R'([A-Z]+)([A-Z][a-z])')
a__ : str = re.compile(R'([a-z\d])([A-Z])')
a__ : Tuple = re.compile(R'(?<!_)_(?!_)')
a__ : Union[str, Any] = re.compile(R'(_{2,})')
a__ : Dict = R'^\w+(\.\w+)*$'
a__ : Optional[Any] = R'<>:/\|?*'
def _UpperCamelCase ( __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = _uppercase_uppercase_re.sub(R"\1_\2" , __A )
UpperCamelCase__ = _lowercase_uppercase_re.sub(R"\1_\2" , __A )
return name.lower()
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = _single_underscore_re.split(__A )
UpperCamelCase__ = [_multiple_underscores_re.split(__A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__A ) if n != "" )
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if os.path.basename(__A ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(__A )
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
if os.path.basename(__A ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , __A ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(__A )}-{split}'''
def _UpperCamelCase ( __A , __A , __A , __A=None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = filename_prefix_for_split(__A , __A )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
UpperCamelCase__ = os.path.join(__A , __A )
return F'''{filepath}*'''
def _UpperCamelCase ( __A , __A , __A , __A=None , __A=None ) -> Any:
'''simple docstring'''
UpperCamelCase__ = filename_prefix_for_split(__A , __A )
UpperCamelCase__ = os.path.join(__A , __A )
if shard_lengths:
UpperCamelCase__ = len(__A )
UpperCamelCase__ = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(__A )]
if filetype_suffix:
UpperCamelCase__ = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
UpperCamelCase__ = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 80 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a__ : Optional[List[str]] = None
a__ : Dict = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a__ : Any = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class lowercase_ :
__UpperCAmelCase = True
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "PIL.Image.Image"
__UpperCAmelCase = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__UpperCAmelCase = field(default='Image' , init=a__ , repr=a__ )
def __call__( self ):
return self.pa_type
def __a ( self , a ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a , a ):
UpperCamelCase__ = np.array(a )
if isinstance(a , a ):
return {"path": value, "bytes": None}
elif isinstance(a , a ):
return {"path": None, "bytes": value}
elif isinstance(a , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a )
elif isinstance(a , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __a ( self , a , a=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(a ):
UpperCamelCase__ = PIL.Image.open(a )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a , config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a )
except ValueError:
UpperCamelCase__ = None
with xopen(a , "rb" , use_auth_token=a ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __a ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __a ( self , a ):
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def __a ( self , a ):
@no_op_if_value_is_null
def path_to_bytes(a ):
with xopen(a , "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase__ = pa.array(
[os.path.basename(a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def _UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _UpperCamelCase ( __A ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__A , format=__A )
return buffer.getvalue()
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if hasattr(__A , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(__A )
UpperCamelCase__ = np.dtype(__A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(__A ) )
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(__A )
if isinstance(__A , __A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__A , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
elif isinstance(__A , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
else:
return objs
else:
return objs
| 80 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self , _lowercase , _lowercase = None , _lowercase = None ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_lowerCAmelCase = torch.zeros(_lowercase , _lowercase )
else:
_lowerCAmelCase = None
_lowerCAmelCase = torch.nn.Parameter(_lowercase )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : VQModel
_lowercase : CLIPTextModel
_lowercase : CLIPTokenizer
_lowercase : TransformeraDModel
_lowercase : LearnedClassifierFreeSamplingEmbeddings
_lowercase : VQDiffusionScheduler
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=_lowercase , transformer=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = len(_lowercase ) if isinstance(_lowercase , _lowercase ) else 1
# get prompt text embeddings
_lowerCAmelCase = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_lowerCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_lowerCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase = prompt_embeds.repeat_interleave(_lowercase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_lowerCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings
_lowerCAmelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(_lowercase , 1 , 1 )
else:
_lowerCAmelCase = [""""""] * batch_size
_lowerCAmelCase = text_input_ids.shape[-1]
_lowerCAmelCase = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=_lowercase , truncation=_lowercase , return_tensors="""pt""" , )
_lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_lowerCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase = negative_prompt_embeds.shape[1]
_lowerCAmelCase = negative_prompt_embeds.repeat(1 , _lowercase , 1 )
_lowerCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _lowercase , _lowercase = 100 , _lowercase = 5.0 , _lowercase = 1.0 , _lowercase = 1 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = 1
elif isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = len(_lowercase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(_lowercase )}' )
_lowerCAmelCase = batch_size * num_images_per_prompt
_lowerCAmelCase = guidance_scale > 1.0
_lowerCAmelCase = self._encode_prompt(_lowercase , _lowercase , _lowercase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(_lowercase )}.' )
# get the initial completely masked latents unless the user supplied it
_lowerCAmelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_lowerCAmelCase = self.transformer.num_vector_embeds - 1
_lowerCAmelCase = torch.full(_lowercase , _lowercase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
_lowerCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
_lowerCAmelCase = self.scheduler.timesteps.to(self.device )
_lowerCAmelCase = latents
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the sample if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_lowerCAmelCase = self.transformer(_lowercase , encoder_hidden_states=_lowercase , timestep=_lowercase ).sample
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase = model_output.chunk(2 )
_lowerCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_lowercase , dim=1 , keepdim=_lowercase )
_lowerCAmelCase = self.truncate(_lowercase , _lowercase )
# remove `log(0)`'s (`-inf`s)
_lowerCAmelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(_lowercase , timestep=_lowercase , sample=_lowercase , generator=_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
_lowerCAmelCase = self.vqvae.config.vq_embed_dim
_lowerCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_lowerCAmelCase = self.vqvae.quantize.get_codebook_entry(_lowercase , shape=_lowercase )
_lowerCAmelCase = self.vqvae.decode(_lowercase , force_not_quantize=_lowercase ).sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = torch.sort(_lowercase , 1 , descending=_lowercase )
_lowerCAmelCase = torch.exp(_lowercase )
_lowerCAmelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_lowerCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , _lowercase )
_lowerCAmelCase = torch.cat((all_true, keep_mask) , dim=1 )
_lowerCAmelCase = keep_mask[:, :-1, :]
_lowerCAmelCase = keep_mask.gather(1 , indices.argsort(1 ) )
_lowerCAmelCase = log_p_x_0.clone()
_lowerCAmelCase = -torch.inf # -inf = log(0)
return rv
| 364 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self , _lowercase = 1 , _lowercase = None , _lowercase = 0.0 , _lowercase = 50 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size , _lowercase ):
_lowerCAmelCase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowerCAmelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_lowerCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCAmelCase = self.unet(_lowercase , _lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(
_lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase ).prev_sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 229 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : List[Any] = logging.get_logger(__name__)
_a : Union[str, Any] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = "bit"
_UpperCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
_UpperCamelCase : Dict = ["SAME", "VALID"]
def __init__( self , a__=3 , a__=64 , a__=[256, 512, 1024, 2048] , a__=[3, 4, 6, 3] , a__="preactivation" , a__="relu" , a__=None , a__=32 , a__=0.0 , a__=False , a__=32 , a__=1 , a__=None , a__=None , **a__ , ):
super().__init__(**a__ )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCAmelCase : List[str] = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : List[str] = embedding_size
_lowerCAmelCase : Optional[Any] = hidden_sizes
_lowerCAmelCase : List[str] = depths
_lowerCAmelCase : Any = layer_type
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = global_padding
_lowerCAmelCase : Optional[Any] = num_groups
_lowerCAmelCase : Tuple = drop_path_rate
_lowerCAmelCase : Any = embedding_dynamic_padding
_lowerCAmelCase : Optional[Any] = output_stride
_lowerCAmelCase : List[Any] = width_factor
_lowerCAmelCase : Any = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(a__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : int = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
| 44 | """simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 50 ) -> int:
_lowerCAmelCase : int = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44 | 1 |
'''simple docstring'''
def a ( __a , __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :List[Any] = len(__a )
UpperCamelCase__ :Any = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
UpperCamelCase__ :List[Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
UpperCamelCase__ :Tuple = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
UpperCamelCase__ :Union[str, Any] = subset[i - 1][j]
if arr[i - 1] <= j:
UpperCamelCase__ :Optional[int] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod() | 219 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__snake_case = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 219 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = F"""{sampling_rate}"""
lowercase__ = '''1'''
lowercase__ = '''f32le'''
lowercase__ = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(lowerCamelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase__ = ffmpeg_process.communicate(lowerCamelCase_ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
lowercase__ = output_stream[0]
lowercase__ = np.frombuffer(lowerCamelCase_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = "f32le" , ):
'''simple docstring'''
lowercase__ = F"""{sampling_rate}"""
lowercase__ = '''1'''
if format_for_conversion == "s16le":
lowercase__ = 2
elif format_for_conversion == "f32le":
lowercase__ = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
lowercase__ = platform.system()
if system == "Linux":
lowercase__ = '''alsa'''
lowercase__ = '''default'''
elif system == "Darwin":
lowercase__ = '''avfoundation'''
lowercase__ = ''':0'''
elif system == "Windows":
lowercase__ = '''dshow'''
lowercase__ = '''default'''
lowercase__ = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
lowercase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase__ = _ffmpeg_stream(lowerCamelCase_ , lowerCamelCase_ )
for item in iterator:
yield item
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
lowercase__ = stream_chunk_s
else:
lowercase__ = chunk_length_s
lowercase__ = ffmpeg_microphone(lowerCamelCase_ , lowerCamelCase_ , format_for_conversion=lowerCamelCase_ )
if format_for_conversion == "s16le":
lowercase__ = np.intaa
lowercase__ = 2
elif format_for_conversion == "f32le":
lowercase__ = np.floataa
lowercase__ = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
lowercase__ = chunk_length_s / 6
lowercase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase_ , (int, float) ):
lowercase__ = [stride_length_s, stride_length_s]
lowercase__ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase__ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase__ = datetime.datetime.now()
lowercase__ = datetime.timedelta(seconds=lowerCamelCase_ )
for item in chunk_bytes_iter(lowerCamelCase_ , lowerCamelCase_ , stride=(stride_left, stride_right) , stream=lowerCamelCase_ ):
# Put everything back in numpy scale
lowercase__ = np.frombuffer(item['''raw'''] , dtype=lowerCamelCase_ )
lowercase__ = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
lowercase__ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False ):
'''simple docstring'''
lowercase__ = b''''''
lowercase__ , lowercase__ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
lowercase__ = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase_ ) < chunk_len:
lowercase__ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase_ ) >= chunk_len:
# We are flushing the accumulator
lowercase__ = (_stride_left, stride_right)
lowercase__ = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
lowercase__ = False
yield item
lowercase__ = stride_left
lowercase__ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase_ ) > stride_left:
lowercase__ = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
lowercase__ = False
yield item
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase_ , stdout=subprocess.PIPE , bufsize=lowerCamelCase_ ) as ffmpeg_process:
while True:
lowercase__ = ffmpeg_process.stdout.read(lowerCamelCase_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 207 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : str=14, lowerCamelCase : List[str]=7, lowerCamelCase : Dict=True, lowerCamelCase : List[str]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Any=True, lowerCamelCase : List[str]=99, lowerCamelCase : Optional[Any]=32, lowerCamelCase : Optional[int]=5, lowerCamelCase : List[Any]=4, lowerCamelCase : List[Any]=37, lowerCamelCase : List[str]="gelu", lowerCamelCase : Any=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : List[Any]=512, lowerCamelCase : List[str]=16, lowerCamelCase : Dict=2, lowerCamelCase : Union[str, Any]=0.02, lowerCamelCase : Optional[int]=3, lowerCamelCase : Dict=4, lowerCamelCase : List[Any]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_token_type_ids
lowercase__ = use_input_mask
lowercase__ = use_labels
lowercase__ = use_mc_token_ids
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = self.vocab_size - 1
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
if self.use_mc_token_ids:
lowercase__ = ids_tensor([self.batch_size, self.num_choices], self.seq_length )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
lowercase__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], *lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = CTRLModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
model(lowerCamelCase, token_type_ids=lowerCamelCase, head_mask=lowerCamelCase )
model(lowerCamelCase, token_type_ids=lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ), config.n_layer )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], *lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = CTRLLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def lowercase__ ( self : List[str], lowerCamelCase : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : int, lowerCamelCase : Any, *lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = CTRLForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase__ = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Dict, lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = CTRLModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, n_embd=37 )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@slow
def lowercase__ ( self : Any ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = CTRLModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Any ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(lowerCamelCase )
lowercase__ = torch.tensor(
[[11_859, 0, 1_611, 8]], dtype=torch.long, device=lowerCamelCase ) # Legal the president is
lowercase__ = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase__ = model.generate(lowerCamelCase, do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].tolist(), lowerCamelCase )
| 207 | 1 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__UpperCAmelCase = 8
def __lowerCamelCase ( __magic_name__ : Tuple , __magic_name__ : Tuple=BITS ):
a__: List[str] =x.device
a__: Tuple =(x * 255).int().clamp(0 , 255 )
a__: int =2 ** torch.arange(bits - 1 , -1 , -1 , device=__magic_name__ )
a__: Dict =rearrange(__magic_name__ , "d -> d 1 1" )
a__: List[str] =rearrange(__magic_name__ , "b c h w -> b c 1 h w" )
a__: List[Any] =((x & mask) != 0).float()
a__: str =rearrange(__magic_name__ , "b c d h w -> b (c d) h w" )
a__: List[str] =bits * 2 - 1
return bits
def __lowerCamelCase ( __magic_name__ : Dict , __magic_name__ : Dict=BITS ):
a__: List[Any] =x.device
a__: int =(x > 0).int()
a__: Optional[int] =2 ** torch.arange(bits - 1 , -1 , -1 , device=__magic_name__ , dtype=torch.intaa )
a__: int =rearrange(__magic_name__ , "d -> d 1 1" )
a__: int =rearrange(__magic_name__ , "b (c d) h w -> b c d h w" , d=8 )
a__: Optional[int] =reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 255).clamp(0.0 , 1.0 )
def __lowerCamelCase ( self : Dict , __magic_name__ : torch.FloatTensor , __magic_name__ : int , __magic_name__ : torch.FloatTensor , __magic_name__ : float = 0.0 , __magic_name__ : bool = True , __magic_name__ : Dict=None , __magic_name__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
a__: Optional[int] =timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
a__: int =self.alphas_cumprod[timestep]
a__: Tuple =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
a__: List[str] =1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__: Optional[int] =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
a__: Tuple =self.bit_scale
if self.config.clip_sample:
a__: Any =torch.clamp(__magic_name__ , -scale , __magic_name__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
a__: str =self._get_variance(__magic_name__ , __magic_name__ )
a__: int =eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
a__: int =(sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__: Optional[int] =(1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__: int =alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
a__: Optional[int] =model_output.device if torch.is_tensor(__magic_name__ ) else "cpu"
a__: int =torch.randn(model_output.shape , dtype=model_output.dtype , generator=__magic_name__ ).to(__magic_name__ )
a__: int =self._get_variance(__magic_name__ , __magic_name__ ) ** 0.5 * eta * noise
a__: List[Any] =prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__magic_name__ , pred_original_sample=__magic_name__ )
def __lowerCamelCase ( self : Optional[int] , __magic_name__ : torch.FloatTensor , __magic_name__ : int , __magic_name__ : torch.FloatTensor , __magic_name__ : str="epsilon" , __magic_name__ : Dict=None , __magic_name__ : bool = True , ):
a__: Optional[int] =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
a__ , a__: List[str] =torch.split(__magic_name__ , sample.shape[1] , dim=1 )
else:
a__: List[Any] =None
# 1. compute alphas, betas
a__: Any =self.alphas_cumprod[t]
a__: Optional[Any] =self.alphas_cumprod[t - 1] if t > 0 else self.one
a__: int =1 - alpha_prod_t
a__: str =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
a__: int =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
a__: Dict =model_output
else:
raise ValueError(F"Unsupported prediction_type {prediction_type}." )
# 3. Clip "predicted x_0"
a__: Any =self.bit_scale
if self.config.clip_sample:
a__: List[str] =torch.clamp(__magic_name__ , -scale , __magic_name__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a__: Union[str, Any] =(alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
a__: Any =self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a__: List[Any] =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
a__: Any =0
if t > 0:
a__: Optional[Any] =torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__magic_name__ ).to(model_output.device )
a__: List[str] =(self._get_variance(__magic_name__ , predicted_variance=__magic_name__ ) ** 0.5) * noise
a__: List[str] =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__magic_name__ , pred_original_sample=__magic_name__ )
class lowerCamelCase__ ( _a ):
def __init__( self : Dict , _a : UNetaDConditionModel , _a : Union[DDIMScheduler, DDPMScheduler] , _a : Optional[float] = 1.0 , ):
super().__init__()
a__: Tuple =bit_scale
a__: str =(
ddim_bit_scheduler_step if isinstance(_a , _a ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self : Optional[int] , _a : Optional[int] = 2_5_6 , _a : Optional[int] = 2_5_6 , _a : Optional[int] = 5_0 , _a : Optional[torch.Generator] = None , _a : Optional[int] = 1 , _a : Optional[str] = "pil" , _a : bool = True , **_a : Dict , ):
a__: Union[str, Any] =torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_a , )
a__: Union[str, Any] =decimal_to_bits(_a ) * self.bit_scale
a__: int =latents.to(self.device )
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
a__: List[str] =self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
a__: Union[str, Any] =self.scheduler.step(_a , _a , _a ).prev_sample
a__: Tuple =bits_to_decimal(_a )
if output_type == "pil":
a__: Optional[Any] =self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 42 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__UpperCAmelCase = {
'''facebook/m2m100_418M''': 10_24,
}
# fmt: off
__UpperCAmelCase = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = ['''input_ids''', '''attention_mask''']
_lowerCAmelCase = []
_lowerCAmelCase = []
def __init__( self : Dict , _a : Tuple , _a : List[Any] , _a : Tuple=None , _a : Dict=None , _a : Any="<s>" , _a : Union[str, Any]="</s>" , _a : str="</s>" , _a : int="<pad>" , _a : str="<unk>" , _a : Tuple="m2m100" , _a : Optional[Dict[str, Any]] = None , _a : str=8 , **_a : str , ):
a__: str ={} if sp_model_kwargs is None else sp_model_kwargs
a__: Optional[int] =language_codes
a__: Dict =FAIRSEQ_LANGUAGE_CODES[language_codes]
a__: Tuple ={lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code}
a__: Any =kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_a )
for lang_code in fairseq_language_code
if self.get_lang_token(_a ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_a , tgt_lang=_a , bos_token=_a , eos_token=_a , sep_token=_a , unk_token=_a , pad_token=_a , language_codes=_a , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_a , **_a , )
a__: Optional[Any] =vocab_file
a__: Tuple =load_json(_a )
a__: Any ={v: k for k, v in self.encoder.items()}
a__: List[str] =spm_file
a__: str =load_spm(_a , self.sp_model_kwargs )
a__: Any =len(self.encoder )
a__: Dict ={
self.get_lang_token(_a ): self.encoder_size + i for i, lang_code in enumerate(_a )
}
a__: List[Any] ={lang_code: self.encoder_size + i for i, lang_code in enumerate(_a )}
a__: Dict ={v: k for k, v in self.lang_token_to_id.items()}
a__: List[str] =src_lang if src_lang is not None else "en"
a__: Any =tgt_lang
a__: Tuple =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
a__: str =num_madeup_words
@property
def _lowerCamelCase ( self : int ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _lowerCamelCase ( self : List[str] ):
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self : Tuple , _a : str ):
a__: Optional[int] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase ( self : int , _a : str ):
return self.sp_model.encode(_a , out_type=_a )
def _lowerCamelCase ( self : Tuple , _a : int ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_a , self.encoder[self.unk_token] )
def _lowerCamelCase ( self : int , _a : int ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_a , self.unk_token )
def _lowerCamelCase ( self : Dict , _a : List[str] ):
a__: str =[]
a__: Union[str, Any] =""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_a ) + token
a__: Dict =[]
else:
current_sub_tokens.append(_a )
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _lowerCamelCase ( self : str , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
a__: Union[str, Any] =[1] * len(self.prefix_tokens )
a__: Optional[Any] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def _lowerCamelCase ( self : Optional[int] , _a : List[int] , _a : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self : Dict ):
a__: List[Any] ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
a__: Dict =self.__dict__.copy()
a__: Union[str, Any] =None
return state
def __setstate__( self : Tuple , _a : Dict ):
a__: str =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__: Optional[Any] ={}
a__: Optional[Any] =load_spm(self.spm_file , self.sp_model_kwargs )
def _lowerCamelCase ( self : Any , _a : str , _a : Optional[str] = None ):
a__: Union[str, Any] =Path(_a )
if not save_dir.is_dir():
raise OSError(F"{save_directory} should be a directory" )
a__: Union[str, Any] =save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
a__: Optional[int] =save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , _a )
if os.path.abspath(self.spm_file ) != os.path.abspath(_a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _a )
elif not os.path.isfile(self.spm_file ):
with open(_a , "wb" ) as fi:
a__: str =self.sp_model.serialized_model_proto()
fi.write(_a )
return (str(_a ), str(_a ))
def _lowerCamelCase ( self : List[str] , _a : List[str] , _a : str = "en" , _a : Optional[List[str]] = None , _a : str = "ro" , **_a : Optional[Any] , ):
a__: Tuple =src_lang
a__: int =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_a , _a , **_a )
def _lowerCamelCase ( self : List[str] , _a : Dict , _a : Optional[str] , _a : Optional[str] , **_a : Optional[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a__: Dict =src_lang
a__: Optional[int] =self(_a , add_special_tokens=_a , **_a )
a__: Union[str, Any] =self.get_lang_id(_a )
a__: Tuple =tgt_lang_id
return inputs
def _lowerCamelCase ( self : List[Any] ):
self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase ( self : List[Any] ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase ( self : Union[str, Any] , _a : str ):
a__: Tuple =self.get_lang_token(_a )
a__: Optional[int] =self.lang_token_to_id[lang_token]
a__: Any =[self.cur_lang_id]
a__: Optional[Any] =[self.eos_token_id]
def _lowerCamelCase ( self : str , _a : str ):
a__: List[str] =self.get_lang_token(_a )
a__: Optional[Any] =self.lang_token_to_id[lang_token]
a__: Optional[int] =[self.cur_lang_id]
a__: Dict =[self.eos_token_id]
def _lowerCamelCase ( self : Any , _a : str ):
return self.lang_code_to_token[lang]
def _lowerCamelCase ( self : int , _a : str ):
a__: int =self.get_lang_token(_a )
return self.lang_token_to_id[lang_token]
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : Dict[str, Any] ):
a__: Tuple =sentencepiece.SentencePieceProcessor(**__magic_name__ )
spm.Load(str(__magic_name__ ) )
return spm
def __lowerCamelCase ( __magic_name__ : str ):
with open(__magic_name__ , "r" ) as f:
return json.load(__magic_name__ )
def __lowerCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : str ):
with open(__magic_name__ , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ , indent=2 )
| 42 | 1 |
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _A :
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int):
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0")
a : Union[str, Any] = img
a : Optional[int] = img.shape[1]
a : Dict = img.shape[0]
a : Dict = dst_width
a : Any = dst_height
a : str = self.src_w / self.dst_w
a : Dict = self.src_h / self.dst_h
a : Dict = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 255
)
def __snake_case ( self : Tuple):
for i in range(self.dst_h):
for j in range(self.dst_w):
a : List[str] = self.img[self.get_y(__UpperCAmelCase)][self.get_x(__UpperCAmelCase)]
def __snake_case ( self : Dict , __UpperCAmelCase : int):
return int(self.ratio_x * x)
def __snake_case ( self : Optional[int] , __UpperCAmelCase : int):
return int(self.ratio_y * y)
if __name__ == "__main__":
__lowercase , __lowercase = 800, 600
__lowercase = imread("""image_data/lena.jpg""", 1)
__lowercase = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 40 |
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A_ ) * abs(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 40 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = (PNDMScheduler,)
UpperCamelCase_ : List[Any] = (("""num_inference_steps""", 50),)
def _snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any:
'''simple docstring'''
A: Optional[Any] = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
'''simple docstring'''
A: List[str] = dict(self.forward_default_kwargs )
A: Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE_ )
A: int = self.dummy_sample
A: Optional[int] = 0.1 * sample
A: List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A: List[str] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
A: int = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
A: Union[str, Any] = dummy_past_residuals[:]
A: Any = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A: Union[str, Any] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A: Union[str, Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A: Tuple = new_scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
pass
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = dict(self.forward_default_kwargs )
A: Optional[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = self.dummy_sample
A: Tuple = 0.1 * sample
A: List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A: Union[str, Any] = self.get_scheduler_config()
A: Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals (must be after setting timesteps)
A: str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
A: List[str] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residual (must be after setting timesteps)
A: str = dummy_past_residuals[:]
A: str = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A: Any = new_scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A: int = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A: Union[str, Any] = new_scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A: List[Any] = self.scheduler_classes[0]
A: Any = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
A: List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
A: List[str] = 10
A: str = self.dummy_model()
A: str = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.prk_timesteps ):
A: Any = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: str = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A: Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Dict = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
A: List[str] = dict(self.forward_default_kwargs )
A: Optional[int] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE_ )
for scheduler_class in self.scheduler_classes:
A: Optional[int] = self.get_scheduler_config()
A: List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
A: List[str] = self.dummy_sample
A: Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE_ , '''set_timesteps''' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE_ , '''set_timesteps''' ):
A: Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A: Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A: Dict = dummy_past_residuals[:]
A: List[Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A: Union[str, Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , 1 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A: Optional[int] = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A: Dict = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , 1 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self : Any ) -> Dict:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
A: Optional[int] = self.scheduler_classes[0]
A: List[str] = self.get_scheduler_config(steps_offset=1 )
A: int = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def _snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> str:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A: Union[str, Any] = 27
for scheduler_class in self.scheduler_classes:
A: int = self.dummy_sample
A: List[Any] = 0.1 * sample
A: str = self.get_scheduler_config()
A: List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A: str = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
def _snake_case ( self : int ) -> str:
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
A: List[str] = self.scheduler_classes[0]
A: Tuple = self.get_scheduler_config()
A: List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _snake_case ( self : int ) -> Tuple:
'''simple docstring'''
A: Optional[int] = self.full_loop()
A: int = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A: Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def _snake_case ( self : Dict ) -> Tuple:
'''simple docstring'''
A: List[str] = self.full_loop(prediction_type='''v_prediction''' )
A: List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A: str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
A: str = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
A: str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A: Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def _snake_case ( self : Union[str, Any] ) -> int:
'''simple docstring'''
A: int = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
A: List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A: int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 334 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger('''transformers.models.encodec''')
UpperCamelCase = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
UpperCamelCase = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
UpperCamelCase = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
UpperCamelCase = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
UpperCamelCase = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
UpperCamelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
UpperCamelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
UpperCamelCase = []
UpperCamelCase = []
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict:
for attribute in key.split('''.''' ):
A: Union[str, Any] = getattr(__lowercase , __lowercase )
if weight_type is not None:
A: Tuple = getattr(__lowercase , __lowercase ).shape
else:
A: str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A: Dict = value
elif weight_type == "weight_g":
A: Tuple = value
elif weight_type == "weight_v":
A: Any = value
elif weight_type == "bias":
A: str = value
elif weight_type == "running_mean":
A: List[Any] = value
elif weight_type == "running_var":
A: Dict = value
elif weight_type == "num_batches_tracked":
A: List[str] = value
elif weight_type == "weight_ih_l0":
A: Dict = value
elif weight_type == "weight_hh_l0":
A: Optional[int] = value
elif weight_type == "bias_ih_l0":
A: List[Any] = value
elif weight_type == "bias_hh_l0":
A: str = value
elif weight_type == "weight_ih_l1":
A: Optional[int] = value
elif weight_type == "weight_hh_l1":
A: int = value
elif weight_type == "bias_ih_l1":
A: Optional[Any] = value
elif weight_type == "bias_hh_l1":
A: str = value
else:
A: Optional[int] = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A , A: Any = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Any = []
if model_name == "encodec_24khz" or "encodec_32khz":
A: List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A: List[Any] = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(__lowercase , __lowercase ):
logger.info(F"""{name} was ignored""" )
continue
A: Optional[int] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A , A: Optional[int] = key.split('''.*.''' )
if prefix in name and suffix in name:
A: str = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
A: Optional[Any] = True
if "*" in mapped_key:
A: Any = name.split(__lowercase )[0].split('''.''' )[-2]
A: Tuple = mapped_key.replace('''*''' , __lowercase )
if "weight_g" in name:
A: str = '''weight_g'''
elif "weight_v" in name:
A: List[Any] = '''weight_v'''
elif "weight_ih_l0" in name:
A: Dict = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
A: int = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
A: Union[str, Any] = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
A: Tuple = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
A: int = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
A: Optional[Any] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
A: Dict = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
A: str = '''bias_hh_l1'''
elif "bias" in name:
A: Union[str, Any] = '''bias'''
elif "weight" in name:
A: Dict = '''weight'''
elif "running_mean" in name:
A: Tuple = '''running_mean'''
elif "running_var" in name:
A: Any = '''running_var'''
elif "num_batches_tracked" in name:
A: str = '''num_batches_tracked'''
else:
A: Tuple = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , ) -> Dict:
if config_path is not None:
A: Tuple = EncodecConfig.from_pretrained(__lowercase )
else:
A: Union[str, Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A: Union[str, Any] = [8, 5, 4, 4]
A: Dict = [2.2]
A: List[Any] = 6_4
A: Optional[Any] = 3_2_0_0_0
A: List[Any] = 2_0_4_8
A: Optional[Any] = False
A: int = False
A: Union[str, Any] = False
elif model_name == "encodec_48khz":
A: Optional[int] = [8, 5, 4, 2]
A: List[Any] = [3.0, 6.0, 1_2.0, 2_4.0]
A: List[Any] = 4_8_0_0_0
A: int = 2
A: List[Any] = False
A: Any = '''time_group_norm'''
A: Optional[Any] = True
A: Any = 1.0
A: Any = 0.0_1
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
A: str = EncodecModel(__lowercase )
A: Optional[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__lowercase )
A: Union[str, Any] = torch.load(__lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A: Optional[int] = original_checkpoint['''best_state''']
recursively_load_weights(__lowercase , __lowercase , __lowercase )
model.save_pretrained(__lowercase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__lowercase )
model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCamelCase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 334 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__A = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase__ ( datasets.BuilderConfig ):
a__ : int = 1_00_00
a__ : Optional[List[str]] = None
a__ : Optional[datasets.Features] = None
class lowerCamelCase__ ( datasets.ArrowBasedBuilder ):
a__ : Optional[Any] = ParquetConfig
def lowerCamelCase_ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE__ , (str, list, tuple) ):
snake_case : Optional[Any] = data_files
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case : Optional[Any] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case : Union[str, Any] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
snake_case : Dict = datasets.Features.from_arrow_schema(pq.read_schema(SCREAMING_SNAKE_CASE__ ) )
break
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE__ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case : Tuple = table_cast(SCREAMING_SNAKE_CASE__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : str = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ) ):
with open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
snake_case : Optional[int] = pq.ParquetFile(SCREAMING_SNAKE_CASE__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
snake_case : Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(SCREAMING_SNAKE_CASE__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE__ )}: {e}''' )
raise
| 148 | '''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_A : List[Any] = 299792458
# Symbols
_A , _A , _A , _A : Union[str, Any] = symbols('''ct x y z''')
def UpperCamelCase_ ( snake_case_ : float ) -> float:
'''simple docstring'''
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def UpperCamelCase_ ( snake_case_ : float ) -> float:
'''simple docstring'''
return 1 / sqrt(1 - beta(snake_case_ ) ** 2 )
def UpperCamelCase_ ( snake_case_ : float ) -> np.ndarray:
'''simple docstring'''
return np.array(
[
[gamma(snake_case_ ), -gamma(snake_case_ ) * beta(snake_case_ ), 0, 0],
[-gamma(snake_case_ ) * beta(snake_case_ ), gamma(snake_case_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def UpperCamelCase_ ( snake_case_ : float , snake_case_ : np.ndarray | None = None ) -> np.ndarray:
'''simple docstring'''
if event is None:
__lowerCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(snake_case_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_A : str = transform(29979245)
print('''Example of four vector: ''')
print(f'ct\' = {four_vector[0]}')
print(f'x\' = {four_vector[1]}')
print(f'y\' = {four_vector[2]}')
print(f'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
_A : int = {ct: c, x: 1, y: 1, z: 1}
_A : Any = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'\n{numerical_vector}')
| 229 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
A__ : str =field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
A__ : ClassVar[Features] =Features({"""audio""": Audio()} )
A__ : ClassVar[Features] =Features({"""labels""": ClassLabel} )
A__ : str ="audio"
A__ : str ="labels"
def A_ ( self : List[Any] , UpperCAmelCase_ : Optional[Any] ):
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , UpperCAmelCase_ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def A_ ( self : Union[str, Any] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 169 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Tuple =FlaxAutoencoderKL
@property
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = (32, 32)
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.uniform(UpperCAmelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE__ = self.dummy_input
return init_dict, inputs_dict
| 169 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = "trajectory_transformer"
lowerCAmelCase_ = ["past_key_values"]
lowerCAmelCase_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , _lowercase : Optional[Any]=1_00 , _lowercase : Union[str, Any]=5 , _lowercase : Tuple=1 , _lowercase : Any=1 , _lowercase : Union[str, Any]=2_49 , _lowercase : List[str]=6 , _lowercase : Optional[Any]=17 , _lowercase : Optional[int]=25 , _lowercase : Union[str, Any]=4 , _lowercase : str=4 , _lowercase : Optional[Any]=1_28 , _lowercase : Optional[Any]=0.1 , _lowercase : Optional[Any]=0.1 , _lowercase : Tuple=0.1 , _lowercase : List[str]=0.00_06 , _lowercase : Union[str, Any]=5_12 , _lowercase : Optional[int]=0.02 , _lowercase : Tuple=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[int]=True , _lowercase : List[Any]=1 , _lowercase : Any=5_02_56 , _lowercase : Tuple=5_02_56 , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 219 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowerCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowerCamelCase : int = [0, 25, 50]
__lowerCamelCase : Tuple = [25, 50, 75]
__lowerCamelCase : List[str] = fuzz.membership.trimf(X, abca)
__lowerCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowerCamelCase : List[str] = np.ones(75)
__lowerCamelCase : Tuple = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowerCamelCase : str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowerCamelCase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowerCamelCase : Union[str, Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowerCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowerCamelCase : List[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowerCamelCase : int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowerCamelCase : int = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowerCamelCase : str = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 219 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : List[Any] = 'mobilenet_v2'
def __init__( self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu6" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.8 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.001 , SCREAMING_SNAKE_CASE_=255 , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
UpperCamelCase : Dict = num_channels
UpperCamelCase : str = image_size
UpperCamelCase : List[str] = depth_multiplier
UpperCamelCase : Dict = depth_divisible_by
UpperCamelCase : int = min_depth
UpperCamelCase : Optional[int] = expand_ratio
UpperCamelCase : Any = output_stride
UpperCamelCase : Tuple = first_layer_is_expansion
UpperCamelCase : Optional[Any] = finegrained_output
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Dict = tf_padding
UpperCamelCase : List[Any] = classifier_dropout_prob
UpperCamelCase : str = initializer_range
UpperCamelCase : Optional[int] = layer_norm_eps
UpperCamelCase : Tuple = semantic_loss_ignore_index
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Optional[Any] = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def a_ ( self ):
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def a_ ( self ):
return 1e-4
| 359 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1 , ):
UpperCamelCase : Tuple = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Optional[Any] = seq_length
UpperCamelCase : int = is_training
UpperCamelCase : Union[str, Any] = use_input_mask
UpperCamelCase : Union[str, Any] = use_token_type_ids
UpperCamelCase : Dict = use_labels
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : int = intermediate_size
UpperCamelCase : str = hidden_act
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : int = type_sequence_label_size
UpperCamelCase : Dict = initializer_range
UpperCamelCase : Dict = num_labels
UpperCamelCase : Tuple = num_choices
UpperCamelCase : Optional[int] = scope
UpperCamelCase : List[Any] = q_groups
UpperCamelCase : Tuple = k_groups
UpperCamelCase : Any = v_groups
UpperCamelCase : List[str] = post_attention_groups
UpperCamelCase : Tuple = intermediate_groups
UpperCamelCase : int = output_groups
def a_ ( self ):
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Tuple = None
if self.use_input_mask:
UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : Dict = None
if self.use_labels:
UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = SqueezeBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = SqueezeBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = SqueezeBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : str = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = self.num_labels
UpperCamelCase : Optional[Any] = SqueezeBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = self.num_labels
UpperCamelCase : str = SqueezeBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = self.num_choices
UpperCamelCase : Tuple = SqueezeBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self ):
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = config_and_inputs
UpperCamelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowercase : Dict = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase : Dict = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Dict = False
lowercase : str = True
lowercase : str = False
def a_ ( self ):
UpperCamelCase : Any = SqueezeBertModelTester(self )
UpperCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def a_ ( self ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[Any] = SqueezeBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def a_ ( self ):
UpperCamelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
UpperCamelCase : Dict = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : Optional[Any] = torch.Size((1, 3) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 27 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowercase : int = logging.get_logger(__name__)
lowercase : Optional[int] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase : List[Any] = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
lowercase : Tuple = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"TurkuNLP/bert-base-finnish-cased-v1": 512,
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
"wietsedv/bert-base-dutch-cased": 512,
}
lowercase : Optional[int] = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_INIT_CONFIGURATION
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase = BertTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="[UNK]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[PAD]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_snake_case = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = tokenize_chinese_chars
_snake_case = normalizer_class(**lowerCAmelCase_ )
_snake_case = do_lower_case
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 42 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False , **lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = vocab_size
_snake_case = d_embed
_snake_case = d_proj
_snake_case = cutoffs + [vocab_size]
_snake_case = [0] + self.cutoffs
_snake_case = div_val
_snake_case = self.cutoffs[0]
_snake_case = len(self.cutoffs ) - 1
_snake_case = self.shortlist_size + self.n_clusters
_snake_case = keep_order
_snake_case = []
_snake_case = []
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.n_clusters > 0:
_snake_case = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_weight' )
_snake_case = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_snake_case = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' , )
self.out_projs.append(lowerCAmelCase_ )
else:
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case = self.d_embed // (self.div_val**i)
_snake_case = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' )
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase_ )
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = x
if proj is not None:
_snake_case = tf.einsum('ibd,ed->ibe' , lowerCAmelCase_ , lowerCAmelCase_ )
return tf.einsum('ibd,nd->ibn' , lowerCAmelCase_ , lowerCAmelCase_ ) + b
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = tf.range(lp_size[0] , dtype=target.dtype )
_snake_case = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = 0
if self.n_clusters == 0:
_snake_case = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_snake_case = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
else:
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = []
_snake_case = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_snake_case = (target >= l_idx) & (target < r_idx)
_snake_case = tf.where(lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx
if self.div_val == 1:
_snake_case = self.out_layers[0][0][l_idx:r_idx]
_snake_case = self.out_layers[0][1][l_idx:r_idx]
else:
_snake_case = self.out_layers[i][0]
_snake_case = self.out_layers[i][1]
if i == 0:
_snake_case = tf.concat([cur_W, self.cluster_weight] , 0 )
_snake_case = tf.concat([cur_b, self.cluster_bias] , 0 )
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
_snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster
_snake_case = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase_ )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) )
_snake_case = tf.concat(lowerCAmelCase_ , axis=-1 )
if target is not None:
if return_mean:
_snake_case = tf.reduce_mean(lowerCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase_ , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 42 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : str = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[Any] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Union[str, Any] = "openai-gpt"
__magic_name__ : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , lowerCAmelCase : Optional[Any]=40478 , lowerCAmelCase : str=512 , lowerCAmelCase : List[Any]=768 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : int=12 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Tuple=1E-5 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Optional[int]="cls_index" , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=True , lowerCAmelCase : int=0.1 , **lowerCAmelCase : Optional[int] , )-> str:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = afn
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = summary_type
UpperCAmelCase = summary_use_proj
UpperCAmelCase = summary_activation
UpperCAmelCase = summary_first_dropout
UpperCAmelCase = summary_proj_to_labels
super().__init__(**lowerCAmelCase )
| 91 | 1 |
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =0
# if input_string is "aba" than new_input_string become "a|b|a"
SCREAMING_SNAKE_CASE =''
SCREAMING_SNAKE_CASE =''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCAmelCase_ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0
# length[i] shows the length of palindromic substring with center i
SCREAMING_SNAKE_CASE =[1 for i in range(len(lowerCAmelCase_ ) )]
# for each character in new_string find corresponding palindromic string
SCREAMING_SNAKE_CASE =0
for j in range(len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE =1 if j > r else min(length[l + r - j] // 2, r - j + 1 )
while (
j - k >= 0
and j + k < len(lowerCAmelCase_ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
SCREAMING_SNAKE_CASE =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
SCREAMING_SNAKE_CASE =j - k + 1 # noqa: E741
SCREAMING_SNAKE_CASE =j + k - 1
# update max_length and start position
if max_length < length[j]:
SCREAMING_SNAKE_CASE =length[j]
SCREAMING_SNAKE_CASE =j
# create that string
SCREAMING_SNAKE_CASE =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE =tempfile.mktemp()
with open(snake_case ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case )
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case )
finally:
os.remove(snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCAmelCase ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCAmelCase ( cls : List[Any] ):
SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _lowerCAmelCase ( cls : Tuple ):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def _lowerCAmelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case )
bert_tokenizer.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def _lowerCAmelCase ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE =Trie()
SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case ,['AB', 'C'] )
| 334 | 1 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Tuple = XCLIPTextConfig()
# derive patch size from model name
UpperCamelCase_ : Dict = model_name.find('patch' )
UpperCamelCase_ : Dict = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
UpperCamelCase_ : Optional[Any] = XCLIPVisionConfig(patch_size=lowerCamelCase , num_frames=lowerCamelCase )
if "large" in model_name:
UpperCamelCase_ : Any = 768
UpperCamelCase_ : Any = 3072
UpperCamelCase_ : List[Any] = 12
UpperCamelCase_ : Any = 1024
UpperCamelCase_ : List[Any] = 4096
UpperCamelCase_ : Union[str, Any] = 16
UpperCamelCase_ : int = 24
UpperCamelCase_ : Any = 768
UpperCamelCase_ : List[str] = 3072
if model_name == "xclip-large-patch14-16-frames":
UpperCamelCase_ : str = 336
UpperCamelCase_ : int = XCLIPConfig.from_text_vision_configs(lowerCamelCase , lowerCamelCase )
if "large" in model_name:
UpperCamelCase_ : Union[str, Any] = 768
return config
def __lowercase ( lowerCamelCase : Optional[Any] ):
# text encoder
if name == "token_embedding.weight":
UpperCamelCase_ : Dict = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
UpperCamelCase_ : Dict = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
UpperCamelCase_ : List[Any] = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
UpperCamelCase_ : int = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
UpperCamelCase_ : Optional[Any] = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
UpperCamelCase_ : Any = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
UpperCamelCase_ : Any = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
UpperCamelCase_ : List[Any] = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
UpperCamelCase_ : Optional[Any] = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
UpperCamelCase_ : Dict = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
UpperCamelCase_ : Tuple = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
UpperCamelCase_ : Tuple = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
UpperCamelCase_ : Optional[Any] = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
UpperCamelCase_ : Any = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
UpperCamelCase_ : Union[str, Any] = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
UpperCamelCase_ : Optional[Any] = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
UpperCamelCase_ : List[str] = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
UpperCamelCase_ : Union[str, Any] = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
UpperCamelCase_ : Optional[Any] = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
UpperCamelCase_ : Dict = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
UpperCamelCase_ : Tuple = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
UpperCamelCase_ : Optional[Any] = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Dict ):
for key in orig_state_dict.copy().keys():
UpperCamelCase_ : int = orig_state_dict.pop(lowerCamelCase )
if "attn.in_proj" in key:
UpperCamelCase_ : Optional[Any] = key.split('.' )
if key.startswith('visual' ):
UpperCamelCase_ : Optional[Any] = key_split[3]
UpperCamelCase_ : str = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCamelCase_ : Dict = val[
:dim, :
]
UpperCamelCase_ : List[str] = val[
dim : dim * 2, :
]
UpperCamelCase_ : List[Any] = val[
-dim:, :
]
else:
UpperCamelCase_ : Union[str, Any] = val[
:dim
]
UpperCamelCase_ : Union[str, Any] = val[
dim : dim * 2
]
UpperCamelCase_ : str = val[
-dim:
]
else:
if "weight" in key:
UpperCamelCase_ : List[Any] = val[
:dim, :
]
UpperCamelCase_ : Dict = val[
dim : dim * 2, :
]
UpperCamelCase_ : List[Any] = val[
-dim:, :
]
else:
UpperCamelCase_ : Any = val[:dim]
UpperCamelCase_ : Dict = val[
dim : dim * 2
]
UpperCamelCase_ : List[str] = val[-dim:]
elif key.startswith('mit' ):
UpperCamelCase_ : int = key_split[2]
UpperCamelCase_ : Optional[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCamelCase_ : Optional[Any] = val[:dim, :]
UpperCamelCase_ : Optional[Any] = val[dim : dim * 2, :]
UpperCamelCase_ : int = val[-dim:, :]
else:
UpperCamelCase_ : str = val[:dim]
UpperCamelCase_ : Optional[int] = val[dim : dim * 2]
UpperCamelCase_ : Optional[int] = val[-dim:]
else:
UpperCamelCase_ : int = key_split[2]
UpperCamelCase_ : List[Any] = config.text_config.hidden_size
if "weight" in key:
UpperCamelCase_ : Tuple = val[:dim, :]
UpperCamelCase_ : Optional[int] = val[
dim : dim * 2, :
]
UpperCamelCase_ : Optional[int] = val[-dim:, :]
else:
UpperCamelCase_ : List[Any] = val[:dim]
UpperCamelCase_ : List[Any] = val[
dim : dim * 2
]
UpperCamelCase_ : int = val[-dim:]
else:
UpperCamelCase_ : Optional[int] = rename_key(lowerCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCamelCase_ : Optional[Any] = val.T
UpperCamelCase_ : List[Any] = val
return orig_state_dict
def __lowercase ( lowerCamelCase : Dict ):
if num_frames == 8:
UpperCamelCase_ : List[Any] = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
UpperCamelCase_ : List[Any] = 'eating_spaghetti.npy'
elif num_frames == 32:
UpperCamelCase_ : Optional[Any] = 'eating_spaghetti_32_frames.npy'
UpperCamelCase_ : str = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=lowerCamelCase , repo_type='dataset' , )
UpperCamelCase_ : Optional[Any] = np.load(lowerCamelCase )
return list(lowerCamelCase )
def __lowercase ( lowerCamelCase : str , lowerCamelCase : Tuple=None , lowerCamelCase : Union[str, Any]=False ):
UpperCamelCase_ : Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
UpperCamelCase_ : Any = model_to_url[model_name]
UpperCamelCase_ : Tuple = 8
if "16-frames" in model_name:
UpperCamelCase_ : str = 16
elif "shot" in model_name:
UpperCamelCase_ : int = 32
UpperCamelCase_ : Tuple = get_xclip_config(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : List[str] = XCLIPModel(lowerCamelCase )
model.eval()
if "drive" in checkpoint_url:
UpperCamelCase_ : Optional[Any] = 'pytorch_model.bin'
gdown.cached_download(lowerCamelCase , lowerCamelCase , quiet=lowerCamelCase )
UpperCamelCase_ : Optional[int] = torch.load(lowerCamelCase , map_location='cpu' )['model']
else:
UpperCamelCase_ : List[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase )['model']
UpperCamelCase_ : int = convert_state_dict(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : int = XCLIPModel(lowerCamelCase )
UpperCamelCase_ : Optional[int] = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCamelCase_ : Optional[Any] = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
UpperCamelCase_ : List[str] = VideoMAEImageProcessor(size=lowerCamelCase )
UpperCamelCase_ : Optional[int] = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
UpperCamelCase_ : List[str] = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
UpperCamelCase_ : Optional[int] = XCLIPProcessor(image_processor=lowerCamelCase , tokenizer=lowerCamelCase )
UpperCamelCase_ : str = prepare_video(lowerCamelCase )
UpperCamelCase_ : Any = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=lowerCamelCase , return_tensors='pt' , padding=lowerCamelCase )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCamelCase_ : Optional[int] = model(**lowerCamelCase )
# Verify outputs
UpperCamelCase_ : int = outputs.logits_per_video
UpperCamelCase_ : Optional[int] = logits_per_video.softmax(dim=1 )
print('Probs:' , lowerCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCamelCase_ : Union[str, Any] = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCamelCase_ : List[Any] = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCamelCase_ : List[Any] = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCamelCase_ : int = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCamelCase_ : List[str] = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCamelCase_ : Optional[int] = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCamelCase_ : int = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCamelCase_ : Any = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCamelCase_ : Dict = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCamelCase_ : Tuple = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCamelCase_ : List[Any] = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCamelCase_ : Optional[Any] = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCamelCase_ : int = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCamelCase_ : List[str] = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCamelCase_ : List[str] = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCamelCase_ : List[Any] = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCamelCase_ : List[Any] = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCamelCase_ : str = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(lowerCamelCase , organization='nielsr' )
processor.push_to_hub(lowerCamelCase , organization='nielsr' )
slow_tokenizer.push_to_hub(lowerCamelCase , organization='nielsr' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 358 | a_ = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
a_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 50 | 0 |
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
UpperCAmelCase__ = gray_code_sequence_string(_lowerCAmelCase )
#
# convert them to integers
for i in range(len(_lowerCAmelCase ) ):
UpperCAmelCase__ = int(sequence[i] , 2 )
return sequence
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase__ = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase__ = "0" + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase__ = "1" + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """realm"""
def __init__( self :str , lowerCamelCase :List[Any]=3_0522 , lowerCamelCase :Optional[int]=768 , lowerCamelCase :Any=128 , lowerCamelCase :Tuple=12 , lowerCamelCase :str=12 , lowerCamelCase :List[str]=8 , lowerCamelCase :List[str]=3072 , lowerCamelCase :List[str]="gelu_new" , lowerCamelCase :int=0.1 , lowerCamelCase :Optional[Any]=0.1 , lowerCamelCase :int=512 , lowerCamelCase :Union[str, Any]=2 , lowerCamelCase :str=0.02 , lowerCamelCase :Tuple=1e-12 , lowerCamelCase :Dict=256 , lowerCamelCase :int=10 , lowerCamelCase :List[str]=1e-3 , lowerCamelCase :str=5 , lowerCamelCase :Optional[int]=320 , lowerCamelCase :Union[str, Any]=1335_3718 , lowerCamelCase :str=5000 , lowerCamelCase :str=1 , lowerCamelCase :List[Any]=0 , lowerCamelCase :Tuple=2 , **lowerCamelCase :Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
# Common config
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = retriever_proj_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = num_candidates
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = layer_norm_eps
# Reader config
UpperCAmelCase__ = span_hidden_size
UpperCAmelCase__ = max_span_width
UpperCAmelCase__ = reader_layer_norm_eps
UpperCAmelCase__ = reader_beam_size
UpperCAmelCase__ = reader_seq_len
# Retrieval config
UpperCAmelCase__ = num_block_records
UpperCAmelCase__ = searcher_beam_size
| 169 | 1 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
__lowerCamelCase = None
__lowerCamelCase = None
_snake_case = namedtuple("""CoinsDistribResult""", """moves excess""")
def _A ( __magic_name__ ):
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__magic_name__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase__ , lowercase__ = get_distrib(node.left )
lowercase__ , lowercase__ = get_distrib(node.right )
lowercase__ = 1 - left_distrib_excess
lowercase__ = 1 - right_distrib_excess
lowercase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
lowercase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
from __future__ import annotations
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = []
create_all_state(1 , __magic_name__ , __magic_name__ , [] , __magic_name__ )
return result
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__magic_name__ , total_number - level + 2 ):
current_list.append(__magic_name__ )
create_all_state(i + 1 , __magic_name__ , level - 1 , __magic_name__ , __magic_name__ )
current_list.pop()
def _A ( __magic_name__ ):
for i in total_list:
print(*__magic_name__ )
if __name__ == "__main__":
_snake_case = 4
_snake_case = 2
_snake_case = generate_all_combinations(n, k)
print_all_state(total_list)
| 201 | 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : List[Any] = np.full((len(_SCREAMING_SNAKE_CASE ), sequence_length, 2) , _SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : List[str] = np.full((len(_SCREAMING_SNAKE_CASE ), sequence_length) , _SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(_SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Union[str, Any] = tensor[:sequence_length]
else:
lowerCAmelCase__ : str = tensor[:sequence_length]
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Dict = tensor[:sequence_length]
else:
lowerCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : Any = ord(_SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
lowerCAmelCase__ : Any = unicodedata.category(_SCREAMING_SNAKE_CASE )
if cat.startswith('P' ):
return True
return False
@dataclass
class A__ ( lowerCAmelCase_ ):
lowercase = 42
lowercase = True
lowercase = None
lowercase = None
lowercase = -100
lowercase = 'pt'
def _lowerCamelCase ( self : Optional[int] , a : str ):
'''simple docstring'''
import torch
lowerCAmelCase__ : List[str] = 'label' if 'label' in features[0].keys() else 'labels'
lowerCAmelCase__ : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowerCAmelCase__ : List[str] = self.tokenizer.pad(
__a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
lowerCAmelCase__ : Optional[Any] = torch.tensor(batch['entity_ids'] ).shape[1]
lowerCAmelCase__ : Dict = self.tokenizer.padding_side
if padding_side == "right":
lowerCAmelCase__ : Any = [
list(__a ) + [self.label_pad_token_id] * (sequence_length - len(__a )) for label in labels
]
else:
lowerCAmelCase__ : str = [
[self.label_pad_token_id] * (sequence_length - len(__a )) + list(__a ) for label in labels
]
lowerCAmelCase__ : int = [feature['ner_tags'] for feature in features]
lowerCAmelCase__ : str = padding_tensor(__a , -1 , __a , __a )
lowerCAmelCase__ : List[Any] = [feature['original_entity_spans'] for feature in features]
lowerCAmelCase__ : Optional[Any] = padding_tensor(__a , (-1, -1) , __a , __a )
lowerCAmelCase__ : Optional[Any] = {k: torch.tensor(__a , dtype=torch.intaa ) for k, v in batch.items()}
return batch | 212 |
'''simple docstring'''
import torch
from transformers import AutoModel
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , __a="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(__a , self ).__init__()
__a : Tuple = AutoModel.from_pretrained(__a , return_dict=__a )
__a : int = torch.nn.CosineSimilarity(3 , 1E-0_8 )
__a : Union[str, Any] = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return self.bert(**__a ).last_hidden_state
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=__a )
def __UpperCAmelCase ( self , __a , __a , __a=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(__a , __a ) )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : str = W_supports['sizes'].tolist()
__a : Union[str, Any] = W_supports['start_token_id'].item()
__a : Any = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a : Tuple = self.BERT(**__a )
__a : str = self.BERT(**__a )
__a : Any = None
__a : Dict = None
__a : Dict = W_supports['input_ids'] == start_token_id
__a : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
__a : Optional[int] = 0
else:
__a : Union[str, Any] = support_sizes[i - 1]
__a : int = S[s : s + size][start_token_masks[s : s + size]]
__a : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]]
__a : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a : str = torch.vstack((p_starts, p_start) )
__a : str = torch.vstack((p_ends, p_end) )
else:
__a : List[str] = p_start
__a : int = p_end
return p_starts, p_ends
| 27 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCAmelCase__ :
__a = 42
__a = 42
class lowerCAmelCase__ :
def __init__( self : int , _lowerCamelCase : int ):
_snake_case = [[] for _ in range(_lowerCamelCase )]
_snake_case = size
def __getitem__( self : Optional[int] , _lowerCamelCase : int ):
return iter(self._graph[vertex] )
@property
def lowercase ( self : Optional[int] ):
return self._size
def lowercase ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(_lowerCamelCase , _lowerCamelCase ) )
def lowercase ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int ):
_snake_case = deque([start_vertex] )
_snake_case = [None] * self.size
_snake_case = 0
while queue:
_snake_case = queue.popleft()
_snake_case = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_snake_case = current_distance + edge.weight
_snake_case = distances[edge.destination_vertex]
if (
isinstance(_lowerCamelCase , _lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
_snake_case = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
"""simple docstring"""
from __future__ import annotations
from random import random
class lowerCAmelCase__ :
def __init__( self : str , _lowerCamelCase : int | None = None ):
_snake_case = value
_snake_case = random()
_snake_case = None
_snake_case = None
def __repr__( self : int ):
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self : Optional[int] ):
_snake_case = str(self.value ) + ''' '''
_snake_case = str(self.left or '''''' )
_snake_case = str(self.right or '''''' )
return value + left + right
def _UpperCAmelCase ( __lowerCamelCase : Node | None , __lowerCamelCase : int ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case = split(root.left , __lowerCamelCase )
return left, root
else:
_snake_case , _snake_case = split(root.right , __lowerCamelCase )
return root, right
def _UpperCAmelCase ( __lowerCamelCase : Node | None , __lowerCamelCase : Node | None ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case = merge(left.right , __lowerCamelCase )
return left
else:
_snake_case = merge(__lowerCamelCase , right.left )
return right
def _UpperCAmelCase ( __lowerCamelCase : Node | None , __lowerCamelCase : int ) -> Node | None:
_snake_case = Node(__lowerCamelCase )
_snake_case , _snake_case = split(__lowerCamelCase , __lowerCamelCase )
return merge(merge(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Node | None , __lowerCamelCase : int ) -> Node | None:
_snake_case , _snake_case = split(__lowerCamelCase , value - 1 )
_snake_case , _snake_case = split(__lowerCamelCase , __lowerCamelCase )
return merge(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Node | None ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def _UpperCAmelCase ( __lowerCamelCase : Node | None , __lowerCamelCase : str ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case = insert(__lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case = erase(__lowerCamelCase , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def _UpperCAmelCase ( ) -> None:
_snake_case = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
_snake_case = input()
while args != "q":
_snake_case = interact_treap(__lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
_snake_case = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 40 | 1 |
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 91 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = """▁"""
UpperCAmelCase_ : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase_ : str = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCAmelCase_ : str = {
"""facebook/xglm-564M""": 2048,
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , lowercase_ : str , lowercase_ : Tuple="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : List[Any]="<s>" , lowercase_ : Union[str, Any]="<unk>" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Tuple = [F'<madeupword{i}>' for i in range(self.num_madeup_words)]
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''additional_special_tokens''' , [])
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
SCREAMING_SNAKE_CASE_ : List[Any] = len(self.sp_model)
SCREAMING_SNAKE_CASE_ : Optional[Any] = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)}
self.fairseq_tokens_to_ids.update(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
if token_ids_a is None:
return [1] + ([0] * len(lowercase_))
return [1] + ([0] * len(lowercase_)) + [1, 1] + ([0] * len(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str):
'''simple docstring'''
return self.sp_model.encode(lowercase_ , out_type=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Union[str, Any]):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.PieceToId(lowercase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any]):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(lowercase_).replace(lowercase_ , ''' ''').strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(lowercase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , '''wb''') as fi:
SCREAMING_SNAKE_CASE_ : int = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 91 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :Dict = logging.get_logger(__name__)
lowerCAmelCase__ :List[str] = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __a ( __lowerCamelCase ):
_a : Tuple = 'speech_to_text_2'
_a : Any = ['past_key_values']
_a : Dict = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _SCREAMING_SNAKE_CASE=10000 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1024 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = d_model
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = use_cache
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 357 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase__ :Optional[Any] = datasets.logging.get_logger(__name__)
lowerCAmelCase__ :str = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
lowerCAmelCase__ :List[Any] = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
lowerCAmelCase__ :Optional[int] = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def lowerCAmelCase__ ( a__: int , a__: int , a__: Dict=False , a__: str=False , a__: Optional[int]=True , a__: Any=False , a__: str="dummy_doc" ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {doc: key_lines}
_UpperCAmelCase = {doc: sys_lines}
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase , _UpperCAmelCase = reader.get_doc_mentions(a__ , key_doc_lines[doc] , a__ )
key_singletons_num += singletons_num
if NP_only or min_span:
_UpperCAmelCase = reader.set_annotated_parse_trees(a__ , key_doc_lines[doc] , a__ , a__ )
_UpperCAmelCase , _UpperCAmelCase = reader.get_doc_mentions(a__ , sys_doc_lines[doc] , a__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
_UpperCAmelCase = reader.set_annotated_parse_trees(a__ , key_doc_lines[doc] , a__ , a__ )
if remove_nested:
_UpperCAmelCase , _UpperCAmelCase = reader.remove_nested_coref_mentions(a__ , a__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_UpperCAmelCase , _UpperCAmelCase = reader.remove_nested_coref_mentions(a__ , a__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_UpperCAmelCase = reader.get_mention_assignments(a__ , a__ )
_UpperCAmelCase = reader.get_mention_assignments(a__ , a__ )
_UpperCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'Number of resulting singleton clusters in the key '
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'files, respectively' )
return doc_coref_infos
def lowerCAmelCase__ ( a__: Any , a__: List[str] , a__: List[str] , a__: Optional[int] , a__: Optional[Any] , a__: Any , a__: Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = get_coref_infos(a__ , a__ , a__ , a__ , a__ , a__ )
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for name, metric in metrics:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = evaluator.evaluate_documents(a__ , a__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(1_0 ) , F'''Recall: {recall * 1_0_0:.2f}''' , F''' Precision: {precision * 1_0_0:.2f}''' , F''' F1: {fa * 1_0_0:.2f}''' , )
if conll_subparts_num == 3:
_UpperCAmelCase = (conll / 3) * 1_0_0
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({'conll_score': conll} )
return output_scores
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
_UpperCAmelCase = line.split()[5]
if not parse_col == "-":
_UpperCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
_UpperCAmelCase = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
_UpperCAmelCase = util.check_gold_parse_annotation(_SCREAMING_SNAKE_CASE )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_UpperCAmelCase = evaluate(
key_lines=_SCREAMING_SNAKE_CASE , sys_lines=_SCREAMING_SNAKE_CASE , metrics=_SCREAMING_SNAKE_CASE , NP_only=_SCREAMING_SNAKE_CASE , remove_nested=_SCREAMING_SNAKE_CASE , keep_singletons=_SCREAMING_SNAKE_CASE , min_span=_SCREAMING_SNAKE_CASE , )
return score
| 185 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : Tuple ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
snake_case : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowerCAmelCase ( lowercase : Tuple ) -> int:
"""simple docstring"""
snake_case : List[str] = 0
snake_case : Dict = 2
while digits < n:
index += 1
snake_case : Union[str, Any] = len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def __lowerCAmelCase ( lowercase : Tuple = 1000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 203 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=None , _UpperCAmelCase="no" , _UpperCAmelCase="29500" ) -> Tuple:
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Dict = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
lowerCamelCase__ : Optional[Any] = True
elif "IPython" in sys.modules:
lowerCamelCase__ : Optional[Any] = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
lowerCamelCase__ : List[str] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , _UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
lowerCamelCase__ : Optional[Any] = 8
lowerCamelCase__ : List[str] = PrepareForLaunch(_UpperCAmelCase , distributed_type='TPU' )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*_UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port=_UpperCAmelCase , mixed_precision=_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = PrepareForLaunch(_UpperCAmelCase , distributed_type='MULTI_GPU' )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase__ : int = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=2 ) -> Optional[Any]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
lowerCamelCase__ : Optional[Any] = PrepareForLaunch(_UpperCAmelCase , debug=_UpperCAmelCase )
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
| 50 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 53 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowercase : List[Any] = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(_UpperCAmelCase )
else:
lowercase : str = sylvester(number - 1 )
lowercase : Union[str, Any] = num - 1
lowercase : List[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 53 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __magic_name__ :
def __init__( self : Union[str, Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple=2 ,_UpperCAmelCase : Optional[int]=32 ,_UpperCAmelCase : Any=16 ,_UpperCAmelCase : Tuple=3 ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Union[str, Any]=32 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : str=[0, 1, 2, 3] ,_UpperCAmelCase : Optional[Any]=4 ,_UpperCAmelCase : int=37 ,_UpperCAmelCase : int="gelu" ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : List[Any]=0.02 ,_UpperCAmelCase : str=3 ,_UpperCAmelCase : Dict=[1, 384, 24, 24] ,_UpperCAmelCase : str=True ,_UpperCAmelCase : Any=None ,):
_a : Optional[Any] = parent
_a : Any = batch_size
_a : str = image_size
_a : Any = patch_size
_a : Dict = num_channels
_a : int = is_training
_a : str = use_labels
_a : List[Any] = hidden_size
_a : Dict = num_hidden_layers
_a : int = backbone_out_indices
_a : Any = num_attention_heads
_a : Dict = intermediate_size
_a : Dict = hidden_act
_a : Optional[int] = hidden_dropout_prob
_a : Dict = attention_probs_dropout_prob
_a : Union[str, Any] = initializer_range
_a : Tuple = num_labels
_a : Any = backbone_featmap_shape
_a : Any = scope
_a : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_a : List[str] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Optional[Any] ):
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Dict = None
if self.use_labels:
_a : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_a : Any = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
_a : Union[str, Any] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=_UpperCAmelCase ,backbone_featmap_shape=self.backbone_featmap_shape ,)
def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int] ):
_a : Tuple = DPTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Optional[int] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Union[str, Any] ):
_a : Dict = self.num_labels
_a : List[Any] = DPTForDepthEstimation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) )
def __lowercase ( self : str ,_UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : str ):
_a : Union[str, Any] = self.num_labels
_a : Union[str, Any] = DPTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Tuple = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowercase ( self : Dict ):
_a : Dict = self.prepare_config_and_inputs()
_a , _a , _a : Any = config_and_inputs
_a : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : List[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCAmelCase : Tuple = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Dict = False
lowerCAmelCase : Any = False
lowerCAmelCase : Optional[Any] = False
def __lowercase ( self : Optional[int] ):
_a : Union[str, Any] = DPTModelTester(self )
_a : List[Any] = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
def __lowercase ( self : str ):
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) )
def __lowercase ( self : Dict ):
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_UpperCAmelCase )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : str = [*signature.parameters.keys()]
_a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : Any ):
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : List[str] ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase )
def __lowercase ( self : Optional[int] ):
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def __lowercase ( self : str ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[Any] = True
if model_class in get_values(_UpperCAmelCase ):
continue
_a : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : int = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : str = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : Optional[Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[int] = False
_a : Optional[int] = True
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
_a : Any = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Optional[Any] = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : str = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : Optional[Any] ):
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_a : Optional[Any] = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
_a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_a : int = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : Tuple ):
pass
@slow
def __lowercase ( self : Tuple ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_a : int = DPTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowercase ( self : str ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : int = 'add'
with self.assertRaises(_UpperCAmelCase ):
_a : Dict = DPTForDepthEstimation(_UpperCAmelCase )
def __lowerCamelCase ( ) -> Tuple:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ):
_a : Optional[int] = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
_a : List[Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCAmelCase )
_a : Optional[int] = prepare_img()
_a : str = image_processor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : int = model(**_UpperCAmelCase )
_a : Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
_a : List[str] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str , __UpperCAmelCase: str , __UpperCAmelCase: PreTrainedTokenizer , __UpperCAmelCase: int , __UpperCAmelCase: Optional[int] = None , ) -> List[Any]:
UpperCamelCase__ : Dict = {}
if train_file is not None:
UpperCamelCase__ : str = [train_file]
if eval_file is not None:
UpperCamelCase__ : Union[str, Any] = [eval_file]
if test_file is not None:
UpperCamelCase__ : Tuple = [test_file]
UpperCamelCase__ : Optional[Any] = datasets.load_dataset('''csv''' , data_files=__UpperCAmelCase )
UpperCamelCase__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
UpperCamelCase__ : str = features_name.pop(__UpperCAmelCase )
UpperCamelCase__ : List[str] = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCamelCase__ : Optional[Any] = {label: i for i, label in enumerate(__UpperCAmelCase )}
UpperCamelCase__ : Union[str, Any] = tokenizer.model_input_names
UpperCamelCase__ : str = {}
if len(__UpperCAmelCase ) == 1:
for k in files.keys():
UpperCamelCase__ : Optional[int] = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' ) , batched=__UpperCAmelCase , )
elif len(__UpperCAmelCase ) == 2:
for k in files.keys():
UpperCamelCase__ : Dict = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' , ) , batched=__UpperCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCamelCase__ : Any = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCamelCase__ : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCamelCase__ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : int = labelaid[ex[label_name]]
yield (d, label)
UpperCamelCase__ : Tuple = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCamelCase__ : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCamelCase__ : int = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCamelCase__ : Dict = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCamelCase__ : Optional[Any] = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCamelCase__ : Union[str, Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class lowercase__ :
'''simple docstring'''
a : int = field(metadata={"help": "Which column contains the label"} )
a : str = field(default=__lowerCamelCase , metadata={"help": "The path of the training file"} )
a : Optional[str] = field(default=__lowerCamelCase , metadata={"help": "The path of the development file"} )
a : Optional[str] = field(default=__lowerCamelCase , metadata={"help": "The path of the test file"} )
a : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a : bool = field(
default=__lowerCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowercase__ :
'''simple docstring'''
a : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : bool = field(default=__lowerCamelCase , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowerCAmelCase_ ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
f"16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__UpperCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCamelCase__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__UpperCAmelCase ) , labelaid=__UpperCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCamelCase__ : str = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__UpperCAmelCase: EvalPrediction ) -> Dict:
UpperCamelCase__ : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCamelCase__ : Union[str, Any] = TFTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ : List[str] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase__ : Tuple = trainer.evaluate()
UpperCamelCase__ : Optional[int] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(__UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
results.update(__UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 201 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
__snake_case : Optional[Any] = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
__snake_case : Optional[Any] = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
__snake_case : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string"), "prediction_text": datasets.Value("string")},
"references": {
"id": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}),
},
}) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
__lowerCAmelCase : Any = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
__lowerCAmelCase : Dict = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE)
return score | 58 |
"""simple docstring"""
import sys
__snake_case : List[Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _lowercase ( __snake_case ) -> int:
__lowerCAmelCase : int = 1
for digit in s:
product *= int(__snake_case )
return product
def _lowercase ( __snake_case = N ) -> int:
__lowerCAmelCase : Optional[Any] = -sys.maxsize - 1
__lowerCAmelCase : Union[str, Any] = n[:13]
__lowerCAmelCase : Dict = 13
while cur_index < len(__snake_case ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__lowerCAmelCase : List[str] = substr[1:] + n[cur_index]
cur_index += 1
else:
__lowerCAmelCase : Dict = max(__snake_case ,str_eval(__snake_case ) )
__lowerCAmelCase : Optional[int] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""") | 58 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40 |
"""simple docstring"""
import os
import sys
import unittest
__lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowercase = os.path.join(git_repo_path, """src""", """diffusers""")
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Any):
a : List[Any] = find_backend(" if not is_torch_available():")
self.assertEqual(__UpperCAmelCase , "torch")
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers")
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
a : int = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx")
def __snake_case ( self : Union[str, Any]):
a : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , __UpperCAmelCase)
self.assertIn("torch_and_transformers" , __UpperCAmelCase)
self.assertIn("flax_and_transformers" , __UpperCAmelCase)
self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase)
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"])
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"])
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"])
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"])
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"])
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"])
def __snake_case ( self : Tuple):
a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'")
self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n")
a : Dict = create_dummy_object("function" , "'torch'")
self.assertEqual(
__UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n")
a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
a : int = create_dummy_object("FakeClass" , "'torch'")
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]})
self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
| 40 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE_ ( snake_case : int=None )-> Any:
if subparsers is not None:
_lowerCamelCase = subparsers.add_parser('test' )
else:
_lowerCamelCase = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=snake_case , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case )
return parser
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> Optional[int]:
_lowerCamelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
_lowerCamelCase = script_name
else:
_lowerCamelCase = f'--config_file={args.config_file} {script_name}'
_lowerCamelCase = ['accelerate-launch'] + test_args.split()
_lowerCamelCase = execute_subprocess_async(snake_case , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
_lowerCamelCase = test_command_parser()
_lowerCamelCase = parser.parse_args()
test_command(snake_case )
if __name__ == "__main__":
main()
| 80 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __a ( unittest.TestCase ):
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase , _lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase = controlnet_params
_lowerCamelCase = 'bird'
_lowerCamelCase = jax.device_count()
_lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
_lowerCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
_lowerCamelCase = jax.random.PRNGKey(0 )
_lowerCamelCase = jax.random.split(a__ , jax.device_count() )
_lowerCamelCase = replicate(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
_lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase , _lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase = controlnet_params
_lowerCamelCase = 'Chef in the kitchen'
_lowerCamelCase = jax.device_count()
_lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
_lowerCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
_lowerCamelCase = jax.random.PRNGKey(0 )
_lowerCamelCase = jax.random.split(a__ , jax.device_count() )
_lowerCamelCase = replicate(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
_lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 80 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase_ ( _snake_case ):
if "model" in orig_key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = orig_key.replace("""model.""" ,"""""" )
if "norm1" in orig_key:
SCREAMING_SNAKE_CASE__ : str = orig_key.replace("""norm1""" ,"""attention.output.LayerNorm""" )
if "norm2" in orig_key:
SCREAMING_SNAKE_CASE__ : Optional[Any] = orig_key.replace("""norm2""" ,"""output.LayerNorm""" )
if "norm" in orig_key:
SCREAMING_SNAKE_CASE__ : Any = orig_key.replace("""norm""" ,"""LayerNorm""" )
if "transformer" in orig_key:
SCREAMING_SNAKE_CASE__ : str = orig_key.split(""".""" )[0].split("""_""" )[-1]
SCREAMING_SNAKE_CASE__ : List[str] = orig_key.replace(f'''transformer_{layer_num}''' ,f'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
SCREAMING_SNAKE_CASE__ : str = orig_key.replace("""mha.attn""" ,"""attention.self""" )
if "mha" in orig_key:
SCREAMING_SNAKE_CASE__ : Optional[int] = orig_key.replace("""mha""" ,"""attention""" )
if "W_q" in orig_key:
SCREAMING_SNAKE_CASE__ : Any = orig_key.replace("""W_q""" ,"""self.query""" )
if "W_k" in orig_key:
SCREAMING_SNAKE_CASE__ : str = orig_key.replace("""W_k""" ,"""self.key""" )
if "W_v" in orig_key:
SCREAMING_SNAKE_CASE__ : Tuple = orig_key.replace("""W_v""" ,"""self.value""" )
if "ff1" in orig_key:
SCREAMING_SNAKE_CASE__ : Optional[Any] = orig_key.replace("""ff1""" ,"""intermediate.dense""" )
if "ff2" in orig_key:
SCREAMING_SNAKE_CASE__ : Tuple = orig_key.replace("""ff2""" ,"""output.dense""" )
if "ff" in orig_key:
SCREAMING_SNAKE_CASE__ : Any = orig_key.replace("""ff""" ,"""output.dense""" )
if "mlm_class" in orig_key:
SCREAMING_SNAKE_CASE__ : List[str] = orig_key.replace("""mlm.mlm_class""" ,"""cls.predictions.decoder""" )
if "mlm" in orig_key:
SCREAMING_SNAKE_CASE__ : int = orig_key.replace("""mlm""" ,"""cls.predictions.transform""" )
if "cls" not in orig_key:
SCREAMING_SNAKE_CASE__ : str = """yoso.""" + orig_key
return orig_key
def lowercase_ ( _snake_case ,_snake_case ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : str = orig_state_dict.pop(_snake_case )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[int] = orig_state_dict["""cls.predictions.decoder.bias"""]
SCREAMING_SNAKE_CASE__ : Tuple = torch.arange(_snake_case ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.load(_snake_case ,map_location="""cpu""" )["""model_state_dict"""]
SCREAMING_SNAKE_CASE__ : Dict = YosoConfig.from_json_file(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = YosoForMaskedLM(_snake_case )
SCREAMING_SNAKE_CASE__ : str = convert_checkpoint_helper(config.max_position_embeddings ,_snake_case )
print(model.load_state_dict(_snake_case ) )
model.eval()
model.save_pretrained(_snake_case )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase__ : List[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 25 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Dict = ['image_processor']
lowerCamelCase : str = 'SamImageProcessor'
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.image_processor
__lowerCamelCase : str = -10
__lowerCamelCase : List[str] = self.image_processor.size['longest_edge']
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> BatchEncoding:
__lowerCamelCase : Union[str, Any] = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# pop arguments that are not used in the foward but used nevertheless
__lowerCamelCase : List[Any] = encoding_image_processor['original_sizes']
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ): # Checks if Torch or TF tensor
__lowerCamelCase : Dict = original_sizes.numpy()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = self._check_and_preprocess_points(
input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Tuple = self._normalize_and_convert(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , )
return encoding_image_processor
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="pt" , ) -> Optional[int]:
if input_points is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] ) for point in input_points
]
else:
__lowerCamelCase : List[Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for point, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCamelCase , __lowerCamelCase : Tuple = self._pad_points_and_labels(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if input_labels is not None:
__lowerCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box in input_boxes
]
else:
__lowerCamelCase : Optional[int] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
__lowerCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCamelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
__lowerCamelCase : Union[str, Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCamelCase : List[str] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
__lowerCamelCase : str = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCamelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Dict = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCamelCase : Dict = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Tuple = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCamelCase : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Dict = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCamelCase : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Dict = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : List[str] = max([point.shape[0] for point in input_points] )
__lowerCamelCase : Union[str, Any] = []
for i, point in enumerate(SCREAMING_SNAKE_CASE_ ):
if point.shape[0] != expected_nb_points:
__lowerCamelCase : Optional[int] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__lowerCamelCase : List[Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = processed_input_points
return input_points, input_labels
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> np.ndarray:
__lowerCamelCase , __lowerCamelCase : Tuple = original_size
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor._get_preprocess_shape(SCREAMING_SNAKE_CASE_ , longest_edge=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = deepcopy(SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
if is_bounding_box:
__lowerCamelCase : Optional[int] = coords.reshape(-1 , 2 , 2 )
__lowerCamelCase : List[Any] = coords[..., 0] * (new_w / old_w)
__lowerCamelCase : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCamelCase : Tuple = coords.reshape(-1 , 4 )
return coords
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> Optional[Any]:
if input_points is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ): # Checks for TF or Torch tensor
__lowerCamelCase : List[str] = input_points.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_points[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Input points must be a list of list of floating points.' )
__lowerCamelCase : str = [np.array(SCREAMING_SNAKE_CASE_ ) for input_point in input_points]
else:
__lowerCamelCase : Optional[Any] = None
if input_labels is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ):
__lowerCamelCase : Union[str, Any] = input_labels.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_labels[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Input labels must be a list of list integers.' )
__lowerCamelCase : Any = [np.array(SCREAMING_SNAKE_CASE_ ) for label in input_labels]
else:
__lowerCamelCase : Dict = None
if input_boxes is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ):
__lowerCamelCase : int = input_boxes.numpy().tolist()
if (
not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0] , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0][0] , SCREAMING_SNAKE_CASE_ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
__lowerCamelCase : List[Any] = [np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCamelCase : Tuple = None
return input_points, input_labels, input_boxes
@property
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return self.image_processor.post_process_masks(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 185 | 0 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( snake_case__ ) -> List[str]:
lowerCamelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def a__ ( snake_case__ ) -> int:
lowerCamelCase , lowerCamelCase = emb.weight.shape
lowerCamelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCamelCase = emb.weight.data
return lin_layer
def a__ ( snake_case__ ) -> Tuple:
lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )
lowerCamelCase = Namespace(**checkpoint["""cfg"""]["""model"""] )
lowerCamelCase = checkpoint["""model"""]
remove_ignore_keys_(snake_case__ )
lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowerCamelCase = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
lowerCamelCase = XGLMConfig(
vocab_size=snake_case__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCamelCase = XGLMForCausalLM(snake_case__ )
lowerCamelCase = model.load_state_dict(snake_case__ , strict=snake_case__ )
print(snake_case__ )
lowerCamelCase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase : Union[str, Any] = parser.parse_args()
lowerCAmelCase : Tuple = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 168 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *_a , _a=None , _a=None , **_a ):
"""simple docstring"""
super().__init__(*_a , **_a )
lowerCamelCase = eval_examples
lowerCamelCase = post_process_function
def _lowerCAmelCase ( self , _a=None , _a=None , _a=None , _a = "eval" ):
"""simple docstring"""
lowerCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase = self.get_eval_dataloader(_a )
lowerCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase = self.compute_metrics
lowerCamelCase = None
lowerCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase = time.time()
try:
lowerCamelCase = eval_loop(
_a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
lowerCamelCase = compute_metrics
lowerCamelCase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase = self.post_process_function(_a , _a , output.predictions )
lowerCamelCase = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase = metrics.pop(_a )
metrics.update(output.metrics )
else:
lowerCamelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _a )
return metrics
def _lowerCAmelCase ( self , _a , _a , _a=None , _a = "test" ):
"""simple docstring"""
lowerCamelCase = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase = self.compute_metrics
lowerCamelCase = None
lowerCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase = time.time()
try:
lowerCamelCase = eval_loop(
_a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
lowerCamelCase = compute_metrics
lowerCamelCase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase = self.post_process_function(_a , _a , output.predictions , """predict""" )
lowerCamelCase = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_a )
| 168 | 1 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowercase__ ( __lowercase : List[str] ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , __lowercase ).groups()[0]
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : Dict , __A : Dict=None , __A : List[str]=None ):
__UpperCamelCase = file_names
__UpperCamelCase = image_transform
__UpperCamelCase = label_to_id
def __len__( self : int ):
return len(self.file_names )
def __getitem__( self : Tuple , __A : List[str] ):
__UpperCamelCase = self.file_names[idx]
__UpperCamelCase = PIL.Image.open(__A )
__UpperCamelCase = raw_image.convert('RGB' )
if self.image_transform is not None:
__UpperCamelCase = self.image_transform(__A )
__UpperCamelCase = extract_label(__A )
if self.label_to_id is not None:
__UpperCamelCase = self.label_to_id[label]
return {"image": image, "label": label}
def lowercase__ ( __lowercase : Tuple , __lowercase : Tuple ) -> List[str]:
"""simple docstring"""
if args.with_tracking:
__UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
__UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = config['image_size']
if not isinstance(__lowercase , (list, tuple) ):
__UpperCamelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
__UpperCamelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__UpperCamelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
__UpperCamelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__UpperCamelCase = os.path.split(__lowercase )[-1].split('.' )[0]
accelerator.init_trackers(__lowercase , __lowercase )
# Grab all the image filenames
__UpperCamelCase = [os.path.join(args.data_dir , __lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
__UpperCamelCase = [extract_label(__lowercase ) for fname in file_names]
__UpperCamelCase = list(set(__lowercase ) )
id_to_label.sort()
__UpperCamelCase = {lbl: i for i, lbl in enumerate(__lowercase )}
# Set the seed before splitting the data.
np.random.seed(__lowercase )
torch.manual_seed(__lowercase )
torch.cuda.manual_seed_all(__lowercase )
# Split our filenames between train and validation
__UpperCamelCase = np.random.permutation(len(__lowercase ) )
__UpperCamelCase = int(0.8 * len(__lowercase ) )
__UpperCamelCase = random_perm[:cut]
__UpperCamelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__UpperCamelCase = Compose([RandomResizedCrop(__lowercase , scale=(0.5, 1.0) ), ToTensor()] )
__UpperCamelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__lowercase , label_to_id=__lowercase )
# For evaluation, we use a deterministic Resize
__UpperCamelCase = Compose([Resize(__lowercase ), ToTensor()] )
__UpperCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__lowercase , label_to_id=__lowercase )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(__lowercase , shuffle=__lowercase , batch_size=__lowercase , num_workers=4 )
__UpperCamelCase = DataLoader(__lowercase , shuffle=__lowercase , batch_size=__lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = create_model('resnet50d' , pretrained=__lowercase , num_classes=len(__lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__UpperCamelCase = False
for param in model.get_classifier().parameters():
__UpperCamelCase = True
# We normalize the batches of images to be a bit faster.
__UpperCamelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
__UpperCamelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
__UpperCamelCase = OneCycleLR(optimizer=__lowercase , max_lr=__lowercase , epochs=__lowercase , steps_per_epoch=len(__lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the starting epoch so files are named properly
__UpperCamelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
__UpperCamelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__UpperCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__UpperCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__UpperCamelCase = os.path.splitext(__lowercase )[0]
if "epoch" in training_difference:
__UpperCamelCase = int(training_difference.replace('epoch_' , '' ) ) + 1
__UpperCamelCase = None
else:
__UpperCamelCase = int(training_difference.replace('step_' , '' ) )
__UpperCamelCase = resume_step // len(__lowercase )
resume_step -= starting_epoch * len(__lowercase )
# Now we train the model
for epoch in range(__lowercase , __lowercase ):
model.train()
if args.with_tracking:
__UpperCamelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__UpperCamelCase = accelerator.skip_first_batches(__lowercase , __lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__UpperCamelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__UpperCamelCase = (batch['image'] - mean) / std
__UpperCamelCase = model(__lowercase )
__UpperCamelCase = torch.nn.functional.cross_entropy(__lowercase , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__lowercase , __lowercase ):
__UpperCamelCase = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__UpperCamelCase = os.path.join(args.output_dir , __lowercase )
accelerator.save_state(__lowercase )
model.eval()
__UpperCamelCase = 0
__UpperCamelCase = 0
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__UpperCamelCase = (batch['image'] - mean) / std
with torch.no_grad():
__UpperCamelCase = model(__lowercase )
__UpperCamelCase = outputs.argmax(dim=-1 )
__UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['label']) )
__UpperCamelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__UpperCamelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(__lowercase ),
'epoch': epoch,
} , step=__lowercase , )
if checkpointing_steps == "epoch":
__UpperCamelCase = F'''epoch_{epoch}'''
if args.output_dir is not None:
__UpperCamelCase = os.path.join(args.output_dir , __lowercase )
accelerator.save_state(__lowercase )
if args.with_tracking:
accelerator.end_training()
def lowercase__ ( ) -> int:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=__lowercase , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=__lowercase , default=__lowercase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=__lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=__lowercase , default=__lowercase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=__lowercase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(__lowercase , __lowercase )
if __name__ == "__main__":
main()
| 53 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] , __A : Any ):
__UpperCamelCase = data
__UpperCamelCase = None
def __iter__( self : Optional[Any] ):
__UpperCamelCase = self
__UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
__UpperCamelCase = node.next_node
@property
def _lowerCamelCase ( self : List[str] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
a__ : Dict =Node(1)
a__ : Optional[int] =Node(2)
a__ : List[str] =Node(3)
a__ : Optional[int] =Node(4)
print(root_node.has_loop) # False
a__ : str =root_node.next_node
print(root_node.has_loop) # True
a__ : Optional[int] =Node(5)
a__ : List[Any] =Node(6)
a__ : int =Node(5)
a__ : Tuple =Node(6)
print(root_node.has_loop) # False
a__ : str =Node(1)
print(root_node.has_loop) # False
| 53 | 1 |
def snake_case_ ( snake_case = 1_00_00_00 ) -> int:
lowercase__: Dict = limit + 1
lowercase__: Optional[Any] = [0] * limit
for first_term in range(1 , snake_case ):
for n in range(snake_case , snake_case , snake_case ):
lowercase__: Optional[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase__: str = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 363 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class __a ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ) -> Any:
'''simple docstring'''
lowercase__: List[Any] = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ) -> str:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
lowercase__: str = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='test-config' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
lowercase__: Dict = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
lowercase__: Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='valid_org/test-config-org' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
lowercase__: Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowercase__: Tuple = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowercase__: int = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase__: List[Any] = c.n_embd + 1 # int
lowercase__: Any = c.resid_pdrop + 1.0 # float
lowercase__: Any = not c.scale_attn_weights # bool
lowercase__: List[str] = c.summary_type + 'foo' # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , 'mismatch for key: summary_type' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = PretrainedConfig()
lowercase__: Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowercase__: List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F' {", ".join(lowerCAmelCase__ )}.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__: str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowercase__: str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
lowercase__: Optional[Any] = mock.Mock()
lowercase__: Tuple = 500
lowercase__: Any = {}
lowercase__: Dict = HTTPError
lowercase__: Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowercase__: Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowerCAmelCase__ ) as mock_head:
lowercase__: List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
lowercase__: Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowercase__: Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
lowercase__: Optional[int] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase__: str = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase__: Dict = ['config.42.0.0.json']
lowercase__: int = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , 'config.4.0.0.json' ) , os.path.join(lowerCAmelCase__ , 'config.42.0.0.json' ) )
lowercase__: Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowercase__: Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowercase__: Tuple = 'v4.0.0'
lowercase__ , lowercase__: List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase__: Union[str, Any] = 'v3.0.0'
lowercase__: Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 288 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=False ) ->Optional[int]:
_SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
_SCREAMING_SNAKE_CASE = """segformer.encoder.""" + key
if key.startswith("""backbone""" ):
_SCREAMING_SNAKE_CASE = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_SCREAMING_SNAKE_CASE = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
_SCREAMING_SNAKE_CASE = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(__lowerCamelCase )-1}' )
if "norm" in key:
_SCREAMING_SNAKE_CASE = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_SCREAMING_SNAKE_CASE = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
_SCREAMING_SNAKE_CASE = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(__lowerCamelCase )-1}' )
if "layer_norm1" in key:
_SCREAMING_SNAKE_CASE = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
_SCREAMING_SNAKE_CASE = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
_SCREAMING_SNAKE_CASE = key[key.find("""block""" ) + len("""block""" )]
_SCREAMING_SNAKE_CASE = key.replace(F'block{idx}' , F'block.{int(__lowerCamelCase )-1}' )
if "attn.q" in key:
_SCREAMING_SNAKE_CASE = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
_SCREAMING_SNAKE_CASE = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
_SCREAMING_SNAKE_CASE = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
_SCREAMING_SNAKE_CASE = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
_SCREAMING_SNAKE_CASE = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
_SCREAMING_SNAKE_CASE = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
_SCREAMING_SNAKE_CASE = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
_SCREAMING_SNAKE_CASE = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_SCREAMING_SNAKE_CASE = key[key.find("""linear_c""" ) + len("""linear_c""" )]
_SCREAMING_SNAKE_CASE = key.replace(F'linear_c{idx}' , F'linear_c.{int(__lowerCamelCase )-1}' )
if key.startswith("""head""" ):
_SCREAMING_SNAKE_CASE = key.replace("""head""" , """classifier""" )
_SCREAMING_SNAKE_CASE = value
return new_state_dict
def lowerCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any ) ->str:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_SCREAMING_SNAKE_CASE = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
_SCREAMING_SNAKE_CASE = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = kv_weight[
: config.hidden_sizes[i], :
]
_SCREAMING_SNAKE_CASE = kv_bias[: config.hidden_sizes[i]]
_SCREAMING_SNAKE_CASE = kv_weight[
config.hidden_sizes[i] :, :
]
_SCREAMING_SNAKE_CASE = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase ( ) ->List[str]:
_SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ) ->Tuple:
_SCREAMING_SNAKE_CASE = SegformerConfig()
_SCREAMING_SNAKE_CASE = False
# set attributes based on model_name
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
if "segformer" in model_name:
_SCREAMING_SNAKE_CASE = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
_SCREAMING_SNAKE_CASE = 150
_SCREAMING_SNAKE_CASE = """ade20k-id2label.json"""
_SCREAMING_SNAKE_CASE = (1, 150, 128, 128)
elif "city" in model_name:
_SCREAMING_SNAKE_CASE = 19
_SCREAMING_SNAKE_CASE = """cityscapes-id2label.json"""
_SCREAMING_SNAKE_CASE = (1, 19, 128, 128)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = model_name[4:6]
_SCREAMING_SNAKE_CASE = 1000
_SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE = (1, 1000)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
_SCREAMING_SNAKE_CASE = 256
elif size == "b2":
_SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
_SCREAMING_SNAKE_CASE = 768
_SCREAMING_SNAKE_CASE = [3, 4, 6, 3]
elif size == "b3":
_SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
_SCREAMING_SNAKE_CASE = 768
_SCREAMING_SNAKE_CASE = [3, 4, 18, 3]
elif size == "b4":
_SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
_SCREAMING_SNAKE_CASE = 768
_SCREAMING_SNAKE_CASE = [3, 8, 27, 3]
elif size == "b5":
_SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
_SCREAMING_SNAKE_CASE = 768
_SCREAMING_SNAKE_CASE = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
_SCREAMING_SNAKE_CASE = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__lowerCamelCase , align=__lowerCamelCase , do_random_crop=__lowerCamelCase )
# prepare image
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="""pt""" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
_SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase , map_location=torch.device("""cpu""" ) )
else:
_SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
_SCREAMING_SNAKE_CASE = rename_keys(__lowerCamelCase , encoder_only=__lowerCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__lowerCamelCase , __lowerCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = SegformerForImageClassification(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# forward pass
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[
[-1.1_3_7_2e0_1, -1.2_7_8_7e0_1, -1.3_4_7_7e0_1],
[-1.2_5_3_6e0_1, -1.4_1_9_4e0_1, -1.4_4_0_9e0_1],
[-1.3_2_1_7e0_1, -1.4_8_8_8e0_1, -1.5_3_2_7e0_1],
],
[
[-1.4_7_9_1e0_1, -1.7_1_2_2e0_1, -1.8_2_7_7e0_1],
[-1.7_1_6_3e0_1, -1.9_1_9_2e0_1, -1.9_5_3_3e0_1],
[-1.7_8_9_7e0_1, -1.9_9_9_1e0_1, -2.0_3_1_5e0_1],
],
[
[7.6_7_2_3e-0_1, 4.1_9_2_1e-0_1, -7.7_8_7_8e-0_2],
[4.7_7_7_2e-0_1, 9.5_5_5_7e-0_3, -2.8_0_8_2e-0_1],
[3.6_0_3_2e-0_1, -2.4_8_2_6e-0_1, -5.1_1_6_8e-0_1],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
_SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , __lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowercase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 58 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase ( __lowerCamelCase : str ) ->Optional[int]:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
def lowerCamelCase ( *__lowerCamelCase : List[str] ) ->Dict:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
class a_ ( snake_case_ ):
'''simple docstring'''
def __new__( cls , A , A , A ) -> int:
_SCREAMING_SNAKE_CASE = super().__new__(cls , A , A , A )
if not hasattr(A , """key_handler""" ):
setattr(A , """key_handler""" , {} )
setattr(A , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
_SCREAMING_SNAKE_CASE = getattr(A , """handle_key""" , [] )
for key in handled_keys:
_SCREAMING_SNAKE_CASE = value
return new_cls
@staticmethod
def snake_case_( cls ) -> str:
_SCREAMING_SNAKE_CASE = get_character()
if char != KEYMAP["undefined"]:
_SCREAMING_SNAKE_CASE = ord(A )
_SCREAMING_SNAKE_CASE = cls.key_handler.get(A )
if handler:
_SCREAMING_SNAKE_CASE = char
return handler(cls )
else:
return None
def lowerCamelCase ( cls : Any ) ->Dict:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 58 | 1 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__lowerCamelCase : Union[str, Any] = 6_3_7_8_1_3_7.0
__lowerCamelCase : Optional[Any] = 6_3_5_6_7_5_2.3_1_4_2_4_5
__lowerCamelCase : Tuple = 637_8137
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCamelCase : Optional[Any] = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : Optional[int] = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCamelCase : int = haversine_distance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCamelCase : List[str] = (b_lata + b_lata) / 2
UpperCamelCase : str = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCamelCase : Optional[Any] = (sin(SCREAMING_SNAKE_CASE_ ) ** 2) * (cos(SCREAMING_SNAKE_CASE_ ) ** 2)
UpperCamelCase : int = cos(sigma / 2 ) ** 2
UpperCamelCase : Optional[Any] = (sigma - sin(SCREAMING_SNAKE_CASE_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCamelCase : Tuple = (cos(SCREAMING_SNAKE_CASE_ ) ** 2) * (sin(SCREAMING_SNAKE_CASE_ ) ** 2)
UpperCamelCase : Optional[int] = sin(sigma / 2 ) ** 2
UpperCamelCase : Any = (sigma + sin(SCREAMING_SNAKE_CASE_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Dict = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :int = AlbertTokenizer
_UpperCAmelCase :int = AlbertTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :List[str] = True
_UpperCAmelCase :Optional[Any] = True
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase : List[str] = AlbertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = "this is a test"
UpperCamelCase : List[Any] = "this is a test"
return input_text, output_text
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = "<pad>"
UpperCamelCase : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(A_ ) , 3_0000 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase : Any = self.get_tokenizer()
UpperCamelCase : Optional[int] = self.get_rust_tokenizer()
UpperCamelCase : List[Any] = "I was born in 92000, and this is falsé."
UpperCamelCase : Optional[Any] = tokenizer.tokenize(A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : Optional[Any] = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : List[Any] = self.get_rust_tokenizer()
UpperCamelCase : Union[str, Any] = tokenizer.encode(A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = AlbertTokenizer(A_ , keep_accents=A_ )
UpperCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(A_ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [48, 25, 21, 1289] )
UpperCamelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
UpperCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(A_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = AlbertTokenizer(A_ )
UpperCamelCase : Dict = tokenizer.encode("sequence builders" )
UpperCamelCase : Tuple = tokenizer.encode("multi-sequence build" )
UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(A_ )
UpperCamelCase : int = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 140 | 0 |
'''simple docstring'''
a__ : List[Any] = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 80 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCamelCase ( __A = 100 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase__ = pre_numerator
UpperCamelCase__ = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase__ = cur_numerator
UpperCamelCase__ = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Optional[Any] = """timesformer"""
def __init__( self , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=8 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3_072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.02 , UpperCAmelCase__=1e-6 , UpperCAmelCase__=True , UpperCAmelCase__="divided_space_time" , UpperCAmelCase__=0 , **UpperCAmelCase__ , ):
super().__init__(**UpperCAmelCase__ )
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = num_frames
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = qkv_bias
A__ = attention_type
A__ = drop_path_rate
| 198 |
from __future__ import annotations
from random import random
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ = None ):
A__ = value
A__ = random()
A__ = None
A__ = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ):
A__ = str(self.value ) + " "
A__ = str(self.left or "" )
A__ = str(self.right or "" )
return value + left + right
def UpperCamelCase ( _A : Node | None , _A : int )-> tuple[Node | None, Node | None]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A__ , A__ = split(root.left , _A )
return left, root
else:
A__ , A__ = split(root.right , _A )
return root, right
def UpperCamelCase ( _A : Node | None , _A : Node | None )-> Node | None:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A__ = merge(left.right , _A )
return left
else:
A__ = merge(_A , right.left )
return right
def UpperCamelCase ( _A : Node | None , _A : int )-> Node | None:
"""simple docstring"""
A__ = Node(_A )
A__ , A__ = split(_A , _A )
return merge(merge(_A , _A ) , _A )
def UpperCamelCase ( _A : Node | None , _A : int )-> Node | None:
"""simple docstring"""
A__ , A__ = split(_A , value - 1 )
A__ , A__ = split(_A , _A )
return merge(_A , _A )
def UpperCamelCase ( _A : Node | None )-> None:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def UpperCamelCase ( _A : Node | None , _A : str )-> Node | None:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
A__ = insert(_A , int(arg[1:] ) )
elif arg[0] == "-":
A__ = erase(_A , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def UpperCamelCase ( )-> None:
"""simple docstring"""
A__ = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
A__ = input()
while args != "q":
A__ = interact_treap(_A , _A )
print(_A )
A__ = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 198 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : Union[str, Any] = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ["LayoutLMv3FeatureExtractor"]
a_ : List[str] = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 168 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : Union[str, Any] = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ["LayoutLMv3FeatureExtractor"]
a_ : List[str] = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 168 | 1 |
'''simple docstring'''
import os
def a__ ( ):
"""simple docstring"""
with open(os.path.dirname(a__ ) + """/p022_names.txt""" ) as file:
__SCREAMING_SNAKE_CASE = str(file.readlines()[0] )
__SCREAMING_SNAKE_CASE = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i, name in enumerate(a__ ):
for letter in name:
name_score += ord(a__ ) - 64
total_score += (i + 1) * name_score
__SCREAMING_SNAKE_CASE = 0
return total_score
if __name__ == "__main__":
print(solution())
| 356 |
'''simple docstring'''
import os
def a__ ( a__ = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file:
__SCREAMING_SNAKE_CASE = [
[int(a__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1 , a__ ):
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 331 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join(sorted(__lowerCamelCase ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list[str]:
return word_by_signature[signature(__lowerCamelCase )]
lowerCamelCase__ = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
lowerCamelCase__ = sorted({word.strip().lower() for word in data.splitlines()})
lowerCamelCase__ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCamelCase__ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams)) | 212 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _UpperCAmelCase ( __lowerCamelCase : str ) -> List[Any]:
return 1 / (1 + np.exp(-z ))
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> Optional[Any]:
return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> List[str]:
_snake_case = np.dot(__lowerCamelCase , __lowerCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) )
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=7_00_00 ) -> Optional[Any]:
_snake_case = np.zeros(x.shape[1] )
for iterations in range(__lowerCamelCase ):
_snake_case = np.dot(__lowerCamelCase , __lowerCamelCase )
_snake_case = sigmoid_function(__lowerCamelCase )
_snake_case = np.dot(x.T , h - y ) / y.size
_snake_case = theta - alpha * gradient # updating the weights
_snake_case = np.dot(__lowerCamelCase , __lowerCamelCase )
_snake_case = sigmoid_function(__lowerCamelCase )
_snake_case = cost_function(__lowerCamelCase , __lowerCamelCase )
if iterations % 1_00 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCAmelCase__ = datasets.load_iris()
UpperCAmelCase__ = iris.data[:, :2]
UpperCAmelCase__ = (iris.target != 0) * 1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = logistic_reg(alpha, x, y, max_iterations=70000)
print('theta: ', theta) # printing the theta i.e our weights vector
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Union[str, Any]:
return sigmoid_function(
np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 0].min(), x[:, 0].max())
((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 1].min(), x[:, 1].max())
((UpperCAmelCase__) , (UpperCAmelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCAmelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
UpperCAmelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 288 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_lowercase : Tuple = None
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_lowercase : List[Any] = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
_lowercase : Any = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
_lowercase : Dict = """▁"""
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = AlbertTokenizer
def __init__( self : Union[str, Any] , lowerCAmelCase : Tuple=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Any="[CLS]" , lowerCAmelCase : int="[SEP]" , lowerCAmelCase : Any="<unk>" , lowerCAmelCase : List[str]="[SEP]" , lowerCAmelCase : Dict="<pad>" , lowerCAmelCase : Tuple="[CLS]" , lowerCAmelCase : List[Any]="[MASK]" , **lowerCAmelCase : Optional[int] , )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = (
AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase , normalized=lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase )
else mask_token
)
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = do_lower_case
UpperCAmelCase = remove_space
UpperCAmelCase = keep_accents
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ):
copyfile(self.vocab_file , lowerCAmelCase )
return (out_vocab_file,)
| 91 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Any = ShapEPipeline
__magic_name__ : Tuple = ["prompt"]
__magic_name__ : Optional[int] = ["prompt"]
__magic_name__ : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__magic_name__ : Optional[int] = False
@property
def a__( self : Optional[Any] )-> Dict:
"""simple docstring"""
return 32
@property
def a__( self : Dict )-> Dict:
"""simple docstring"""
return 32
@property
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def a__( self : List[str] )-> str:
"""simple docstring"""
return 8
@property
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a__( self : Tuple )-> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCAmelCase )
@property
def a__( self : str )-> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
UpperCAmelCase = PriorTransformer(**lowerCAmelCase )
return model
@property
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase = ShapERenderer(**lowerCAmelCase )
return model
def a__( self : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.dummy_prior
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_renderer
UpperCAmelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=lowerCAmelCase , clip_sample=lowerCAmelCase , clip_sample_range=1.0 , )
UpperCAmelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def a__( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any]=0 )-> Optional[Any]:
"""simple docstring"""
if str(lowerCAmelCase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase )
UpperCAmelCase = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
UpperCAmelCase = output.images[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = torch_device == '''cpu'''
UpperCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase , relax_max_difference=lowerCAmelCase , )
def a__( self : int )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase )
UpperCAmelCase = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = 1
UpperCAmelCase = 2
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase = batch_size * [inputs[key]]
UpperCAmelCase = pipe(**lowerCAmelCase , num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Dict )-> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
UpperCAmelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
UpperCAmelCase = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase = pipe(
'''a shark''' , generator=lowerCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
| 91 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def A_ ( A__ ) -> Tuple:
# A local function to see if a dot lands in the circle.
def is_in_circle(A__ , A__ ) -> bool:
a__ : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : List[str] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(A__ ) )
# The ratio of the area for circle to square is pi/4.
a__ : Optional[Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def A_ ( A__ , A__ , A__ = 0.0 , A__ = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(A__ , A__ ) ) for _ in range(A__ ) ) * (max_value - min_value)
def A_ ( A__ , A__ = 0.0 , A__ = 1.0 ) -> None:
def identity_function(A__ ) -> float:
return x
a__ : List[Any] = area_under_curve_estimator(
A__ , A__ , A__ , A__ )
a__ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def A_ ( A__ ) -> None:
def function_to_integrate(A__ ) -> float:
return sqrt(4.0 - x * x )
a__ : Dict = area_under_curve_estimator(
A__ , A__ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''time_series_transformer'''
lowerCamelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowercase = None , lowercase = None , lowercase = "student_t" , lowercase = "nll" , lowercase = 1 , lowercase = [1, 2, 3, 4, 5, 6, 7] , lowercase = "mean" , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = 3_2 , lowercase = 3_2 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = True , lowercase = "gelu" , lowercase = 6_4 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 1_0_0 , lowercase = 0.02 , lowercase=True , **lowercase , ):
"""simple docstring"""
A_ : Tuple = prediction_length
A_ : Any = context_length or prediction_length
A_ : Any = distribution_output
A_ : Dict = loss
A_ : int = input_size
A_ : Any = num_time_features
A_ : List[str] = lags_sequence
A_ : List[Any] = scaling
A_ : str = num_dynamic_real_features
A_ : List[Any] = num_static_real_features
A_ : Optional[int] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A_ : Any = cardinality
else:
A_ : Any = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A_ : int = embedding_dimension
else:
A_ : Tuple = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
A_ : Any = input_size * len(lowercase ) + self._number_of_features
A_ : List[str] = d_model
A_ : Union[str, Any] = encoder_attention_heads
A_ : int = decoder_attention_heads
A_ : int = encoder_ffn_dim
A_ : str = decoder_ffn_dim
A_ : Tuple = encoder_layers
A_ : Tuple = decoder_layers
A_ : List[str] = dropout
A_ : str = attention_dropout
A_ : List[Any] = activation_dropout
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Optional[int] = activation_function
A_ : str = init_std
A_ : int = use_cache
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 140 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_0 ) -> int:
__lowerCamelCase : Any = n * (n + 1) * (2 * n + 1) / 6
__lowerCamelCase : Any = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 368 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a ={
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 113 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "SpeechT5FeatureExtractor"
SCREAMING_SNAKE_CASE = "SpeechT5Tokenizer"
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]:
lowercase__ : List[Any] = kwargs.pop('''audio''' , __lowerCAmelCase )
lowercase__ : Dict = kwargs.pop('''text''' , __lowerCAmelCase )
lowercase__ : Tuple = kwargs.pop('''text_target''' , __lowerCAmelCase )
lowercase__ : Tuple = kwargs.pop('''audio_target''' , __lowerCAmelCase )
lowercase__ : Optional[int] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowercase__ : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
elif text is not None:
lowercase__ : Union[str, Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
else:
lowercase__ : List[Any] = None
if audio_target is not None:
lowercase__ : Any = self.feature_extractor(audio_target=__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : List[str] = targets['''input_values''']
elif text_target is not None:
lowercase__ : int = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : List[str] = targets['''input_ids''']
else:
lowercase__ : str = None
if inputs is None:
return targets
if targets is not None:
lowercase__ : Optional[Any] = labels
lowercase__ : Optional[int] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__ : Optional[Any] = decoder_attention_mask
return inputs
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
lowercase__ : List[str] = kwargs.pop('''input_values''' , __lowerCAmelCase )
lowercase__ : int = kwargs.pop('''input_ids''' , __lowerCAmelCase )
lowercase__ : str = kwargs.pop('''labels''' , __lowerCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowercase__ : Tuple = self.feature_extractor.pad(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
elif input_ids is not None:
lowercase__ : List[Any] = self.tokenizer.pad(__lowerCAmelCase , **__lowerCAmelCase )
else:
lowercase__ : List[str] = None
if labels is not None:
if "input_ids" in labels or (isinstance(__lowerCAmelCase , __lowerCAmelCase ) and "input_ids" in labels[0]):
lowercase__ : List[Any] = self.tokenizer.pad(__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : List[str] = targets['''input_ids''']
else:
lowercase__ : str = self.feature_extractor.feature_size
lowercase__ : Tuple = self.feature_extractor.num_mel_bins
lowercase__ : Optional[Any] = self.feature_extractor.pad(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Optional[int] = feature_size_hack
lowercase__ : Optional[Any] = targets['''input_values''']
else:
lowercase__ : str = None
if inputs is None:
return targets
if targets is not None:
lowercase__ : List[Any] = labels
lowercase__ : str = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__ : List[Any] = decoder_attention_mask
return inputs
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
| 198 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : List[str] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __UpperCamelCase ( UpperCAmelCase = 100 ):
lowercase__ : Dict = 1
lowercase__ : Optional[int] = 2
for i in range(2 , max_n + 1 ):
lowercase__ : List[str] = pre_numerator
lowercase__ : Optional[Any] = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ : Union[str, Any] = cur_numerator
lowercase__ : int = e_cont * pre_numerator + temp
return sum_digits(UpperCAmelCase )
if __name__ == "__main__":
print(F'{solution() = }')
| 198 | 1 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCamelCase : List[str] = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
__lowerCamelCase : List[str] = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
__lowerCamelCase : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> dict[str, int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : tuple ) -> str:
"""simple docstring"""
return x[0]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_letter_count(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = """""".join(freq_to_letter[freq] )
SCREAMING_SNAKE_CASE__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__UpperCamelCase , reverse=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_frequency_order(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 | from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 204 | 0 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _A ( tf.keras.layers.Layer):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : str = pad_token_id
SCREAMING_SNAKE_CASE_ : Any = max_length
SCREAMING_SNAKE_CASE_ : Tuple = vocab
SCREAMING_SNAKE_CASE_ : List[Any] = merges
SCREAMING_SNAKE_CASE_ : str = BytePairTokenizer(_A , _A , sequence_length=_A )
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [' '.join(_A ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.get_vocab()
return cls(_A , _A , *_A , **_A )
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(_A , *_A , **_A )
return cls.from_tokenizer(_A , *_A , **_A )
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(**_A )
def UpperCAmelCase ( self ):
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.tf_tokenizer(_A )
SCREAMING_SNAKE_CASE_ : Any = tf.ones_like(_A )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = pad_model_inputs(
_A , max_seq_length=_A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 253 |
'''simple docstring'''
def lowerCamelCase ( ):
"""simple docstring"""
return 1
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int = 200 ):
"""simple docstring"""
return two_pound(lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 331 | 0 |
"""simple docstring"""
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if n == 1 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return 0
elif n == 2:
return 1
else:
lowercase__ : Optional[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : Union[str, Any] = 0
lowercase__ : str = 2
while digits < n:
index += 1
lowercase__ : int = len(str(fibonacci(SCREAMING_SNAKE_CASE_ ) ) )
return index
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 1_000 ):
'''simple docstring'''
return fibonacci_digits_index(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 366 |
from __future__ import annotations
import math
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : int = str(SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any] = [n]
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if len(str(SCREAMING_SNAKE_CASE_ ) ) > 3:
if not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[-3:] ) ) or not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[:3] ) ):
return False
return True
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 11 ):
'''simple docstring'''
lowercase__ : list[int] = []
lowercase__ : Tuple = 13
while len(SCREAMING_SNAKE_CASE_ ) != count:
if validate(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[int] = list_truncated_nums(SCREAMING_SNAKE_CASE_ )
if all(is_prime(SCREAMING_SNAKE_CASE_ ) for i in list_nums ):
list_truncated_primes.append(SCREAMING_SNAKE_CASE_ )
num += 2
return list_truncated_primes
def snake_case__ ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 216 | 0 |
"""simple docstring"""
def _A (__a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(__a )
for i in range(n - 1 ):
for j in range(i + 1 , __a ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _A (__a ) -> int:
"""simple docstring"""
if len(__a ) <= 1:
return arr, 0
SCREAMING_SNAKE_CASE_ : int = len(__a ) // 2
SCREAMING_SNAKE_CASE_ : Tuple = arr[0:mid]
SCREAMING_SNAKE_CASE_ : List[str] = arr[mid:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = count_inversions_recursive(__a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = count_inversions_recursive(__a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = _count_cross_inversions(__a , __a )
SCREAMING_SNAKE_CASE_ : str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _A (__a , __a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
while i < len(__a ) and j < len(__a ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__a ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__a ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
SCREAMING_SNAKE_CASE_ : Dict = count_inversions_bf(__a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = count_inversions_recursive(__a )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __a )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
SCREAMING_SNAKE_CASE_ : Optional[int] = count_inversions_bf(__a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = count_inversions_recursive(__a )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __a )
# an empty list should also have zero inversions
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Tuple = count_inversions_bf(__a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = count_inversions_recursive(__a )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __a )
if __name__ == "__main__":
main()
| 91 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
UpperCAmelCase_ : Dict = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30522, type=int)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, """rb""") as fp:
UpperCAmelCase_ : Union[str, Any] = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
UpperCAmelCase_ : Any = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCAmelCase_ : List[Any] = [0] * args.vocab_size
for k, v in counter.items():
UpperCAmelCase_ : Dict = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 91 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowercase :
_a = 4_2
_a = None
_a = None
def lowerCAmelCase_ ( ):
_A : Any = Node(1 )
_A : List[str] = Node(2 )
_A : int = Node(3 )
_A : List[Any] = Node(4 )
_A : int = Node(5 )
return tree
def lowerCAmelCase_ ( snake_case_ ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCAmelCase_ ( snake_case_ ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCAmelCase_ ( snake_case_ ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCAmelCase_ ( snake_case_ ):
return (max(height(root.left ),height(root.right ) ) + 1) if root else 0
def lowerCAmelCase_ ( snake_case_ ):
_A : Any = []
if root is None:
return output
_A : Any = deque([root] )
while process_queue:
_A : List[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = []
def populate_output(snake_case_,snake_case_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left,level - 1 )
populate_output(root.right,level - 1 )
populate_output(a__,a__ )
return output
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = []
def populate_output(snake_case_,snake_case_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right,level - 1 )
populate_output(root.left,level - 1 )
populate_output(a__,a__ )
return output
def lowerCAmelCase_ ( snake_case_ ):
if root is None:
return []
_A : Tuple = []
_A : Tuple = 0
_A : Union[str, Any] = height(a__ )
for h in range(1,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(a__,a__ ) )
_A : Tuple = 1
else:
output.append(get_nodes_from_right_to_left(a__,a__ ) )
_A : str = 0
return output
def lowerCAmelCase_ ( ): # Main function for testing.
_A : List[str] = make_tree()
print(f'''In-order Traversal: {inorder(a__ )}''' )
print(f'''Pre-order Traversal: {preorder(a__ )}''' )
print(f'''Post-order Traversal: {postorder(a__ )}''',"""\n""" )
print(f'''Height of Tree: {height(a__ )}''',"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(a__ ),"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1,height(a__ ) + 1 ):
print(f'''Level {level}:''',get_nodes_from_left_to_right(a__,level=a__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 369 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
@register_to_config
def __init__( self , _a = 32 , _a = 64 , _a = 20 , _a = 768 , _a=77 , _a=4 , _a = 0.0 , _a = "silu" , _a = None , _a = None , _a = "linear" , _a = "prd" , _a = None , _a = None , _a = None , ) -> Any:
super().__init__()
_A : int = num_attention_heads
_A : Union[str, Any] = attention_head_dim
_A : Tuple = num_attention_heads * attention_head_dim
_A : Any = additional_embeddings
_A : Any = time_embed_dim or inner_dim
_A : List[str] = embedding_proj_dim or embedding_dim
_A : Optional[int] = clip_embed_dim or embedding_dim
_A : Union[str, Any] = Timesteps(_a , _a , 0 )
_A : str = TimestepEmbedding(_a , _a , out_dim=_a , act_fn=_a )
_A : Dict = nn.Linear(_a , _a )
if embedding_proj_norm_type is None:
_A : int = None
elif embedding_proj_norm_type == "layer":
_A : Optional[Any] = nn.LayerNorm(_a )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_A : Optional[Any] = nn.Linear(_a , _a )
if encoder_hid_proj_type is None:
_A : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
_A : Tuple = nn.Linear(_a , _a )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_A : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _a ) )
if added_emb_type == "prd":
_A : str = nn.Parameter(torch.zeros(1 , 1 , _a ) )
elif added_emb_type is None:
_A : Union[str, Any] = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_A : int = nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , activation_fn="""gelu""" , attention_bias=_a , )
for d in range(_a )
] )
if norm_in_type == "layer":
_A : Union[str, Any] = nn.LayerNorm(_a )
elif norm_in_type is None:
_A : Tuple = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
_A : int = nn.LayerNorm(_a )
_A : str = nn.Linear(_a , _a )
_A : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
_A : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _a , persistent=_a )
_A : Tuple = nn.Parameter(torch.zeros(1 , _a ) )
_A : Dict = nn.Parameter(torch.zeros(1 , _a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
_A : List[str] = {}
def fn_recursive_add_processors(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
_A : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def a__ ( self , _a ) -> List[str]:
_A : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_a )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def a__ ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
def a__ ( self , _a , _a , _a , _a = None , _a = None , _a = True , ) -> Optional[Any]:
_A : Tuple = hidden_states.shape[0]
_A : List[Any] = timestep
if not torch.is_tensor(_a ):
_A : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
_A : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A : Optional[int] = timesteps * torch.ones(_a , dtype=timesteps.dtype , device=timesteps.device )
_A : Dict = self.time_proj(_a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_A : Tuple = timesteps_projected.to(dtype=self.dtype )
_A : List[Any] = self.time_embedding(_a )
if self.embedding_proj_norm is not None:
_A : Dict = self.embedding_proj_norm(_a )
_A : List[Any] = self.embedding_proj(_a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_A : List[Any] = self.encoder_hidden_states_proj(_a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
_A : Optional[int] = self.proj_in(_a )
_A : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
_A : Union[str, Any] = []
_A : List[str] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_A : List[str] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_A : List[str] = hidden_states[:, None, :]
_A : Dict = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_A : Optional[int] = self.prd_embedding.to(hidden_states.dtype ).expand(_a , -1 , -1 )
additional_embeds.append(_a )
_A : str = torch.cat(
_a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_A : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_A : Union[str, Any] = F.pad(
_a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_A : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_A : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
_A : List[Any] = F.pad(_a , (0, self.additional_embeddings) , value=0.0 )
_A : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_A : int = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_A : str = self.norm_in(_a )
for block in self.transformer_blocks:
_A : List[Any] = block(_a , attention_mask=_a )
_A : Any = self.norm_out(_a )
if self.prd_embedding is not None:
_A : int = hidden_states[:, -1]
else:
_A : Any = hidden_states[:, additional_embeddings_len:]
_A : Union[str, Any] = self.proj_to_clip_embeddings(_a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_a )
def a__ ( self , _a ) -> Tuple:
_A : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 343 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __snake_case ( UpperCamelCase_ ):
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( A_ : ArgumentParser):
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self : List[str]):
raise NotImplementedError()
| 103 |
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> str:
SCREAMING_SNAKE_CASE = int(SCREAMING_SNAKE_CASE_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = divmod(SCREAMING_SNAKE_CASE_ , 2 )
return binary_recursive(SCREAMING_SNAKE_CASE_ ) + str(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> str:
SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ ).strip()
if not number:
raise ValueError('No input value was provided' )
SCREAMING_SNAKE_CASE = '-' if number.startswith('-' ) else ''
SCREAMING_SNAKE_CASE = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F'{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE_ ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 113 | 0 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase_ : list[int] , UpperCamelCase_ : list[int] , UpperCamelCase_ : list[int] , UpperCamelCase_ : list[list[str]] , UpperCamelCase_ : int , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print("""""" )
print(len(lowerCAmelCase__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 361 |
from __future__ import annotations
from typing import Generic, TypeVar
_lowerCamelCase : Dict = TypeVar("T")
class __snake_case (Generic[T] ):
def __init__( self : Dict , _UpperCAmelCase : T ) -> None:
'''simple docstring'''
_lowerCAmelCase : List[Any] = data
_lowerCAmelCase : str = self
_lowerCAmelCase : Tuple = 0
class __snake_case (Generic[T] ):
def __init__( self : Optional[int] ) -> None:
'''simple docstring'''
_lowerCAmelCase : dict[T, DisjointSetTreeNode[T]] = {}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : T ) -> None:
'''simple docstring'''
_lowerCAmelCase : int = DisjointSetTreeNode(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : T ) -> DisjointSetTreeNode[T]:
'''simple docstring'''
_lowerCAmelCase : List[str] = self.map[data]
if elem_ref != elem_ref.parent:
_lowerCAmelCase : Union[str, Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : DisjointSetTreeNode[T] , _UpperCAmelCase : DisjointSetTreeNode[T] ) -> None:
'''simple docstring'''
if nodea.rank > nodea.rank:
_lowerCAmelCase : Dict = nodea
else:
_lowerCAmelCase : Union[str, Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : T , _UpperCAmelCase : T ) -> None:
'''simple docstring'''
self.link(self.find_set(_UpperCAmelCase ) , self.find_set(_UpperCAmelCase ) )
class __snake_case (Generic[T] ):
def __init__( self : Optional[int] ) -> None:
'''simple docstring'''
_lowerCAmelCase : dict[T, dict[T, int]] = {}
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : T ) -> None:
'''simple docstring'''
if node not in self.connections:
_lowerCAmelCase : int = {}
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : T , _UpperCAmelCase : T , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
_lowerCAmelCase : Any = weight
_lowerCAmelCase : int = weight
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> GraphUndirectedWeighted[T]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Union[str, Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _UpperCAmelCase : x[2] )
# creating the disjoint set
_lowerCAmelCase : Dict = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_UpperCAmelCase )
# MST generation
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Any = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = edges[index]
index += 1
_lowerCAmelCase : Dict = disjoint_set.find_set(_UpperCAmelCase )
_lowerCAmelCase : List[str] = disjoint_set.find_set(_UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
disjoint_set.union(_UpperCAmelCase , _UpperCAmelCase )
return graph
| 159 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.