code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase_ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class lowerCamelCase ( lowerCamelCase_ ):
snake_case_ = '''retribert'''
def __init__( self, lowercase_=30522, lowercase_=768, lowercase_=8, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=True, lowercase_=128, lowercase_=0, **lowercase_, ) -> Optional[int]:
super().__init__(pad_token_id=__snake_case, **__snake_case )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = share_encoders
snake_case = projection_dim
| 371 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ''''''
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str:
super().__init__(self, **lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case = fsspec.open(
lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case = os.path.basename(self.file.path.split('::' )[0] )
snake_case = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case = None
@classmethod
def _lowerCamelCase ( cls, lowercase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('/' )
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.dir_cache is None:
snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case = {f['name']: f}
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.file.open().read()
def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any:
snake_case = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''bz2'''
snake_case_ = '''bz2'''
snake_case_ = '''.bz2'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''gzip'''
snake_case_ = '''gzip'''
snake_case_ = '''.gz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''lz4'''
snake_case_ = '''lz4'''
snake_case_ = '''.lz4'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''xz'''
snake_case_ = '''xz'''
snake_case_ = '''.xz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''zstd'''
snake_case_ = '''zstd'''
snake_case_ = '''.zst'''
def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]:
super().__init__(
fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case = self.file.__enter__
class lowerCamelCase :
def __init__( self, lowercase_ ) -> List[Any]:
snake_case = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self, *lowercase_, **lowercase_ ) -> Dict:
self._file.__exit__(*lowercase_, **lowercase_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _lowerCamelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self, lowercase_ ) -> List[Any]:
return getattr(self._file, lowercase_ )
def fixed_enter(*lowercase_, **lowercase_ ):
return WrappedFile(_enter(*lowercase_, **lowercase_ ) )
snake_case = fixed_enter
| 332 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
lowerCAmelCase_ = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase_ = {f"funnel-transformer/{name}": 5_1_2 for name in _model_names}
lowerCAmelCase_ = {f"funnel-transformer/{name}": {"do_lower_case": True} for name in _model_names}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = FunnelTokenizer
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = 2
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=True, lowercase_="<unk>", lowercase_="<sep>", lowercase_="<pad>", lowercase_="<cls>", lowercase_="<mask>", lowercase_="<s>", lowercase_="</s>", lowercase_=True, lowercase_=True, lowercase_=None, lowercase_="##", **lowercase_, ) -> Dict:
super().__init__(
lowercase_, tokenizer_file=lowercase_, do_lower_case=lowercase_, unk_token=lowercase_, sep_token=lowercase_, pad_token=lowercase_, cls_token=lowercase_, mask_token=lowercase_, bos_token=lowercase_, eos_token=lowercase_, clean_text=lowercase_, tokenize_chinese_chars=lowercase_, strip_accents=lowercase_, wordpieces_prefix=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', lowercase_ ) != do_lower_case
or normalizer_state.get('strip_accents', lowercase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars', lowercase_ ) != tokenize_chinese_chars
):
snake_case = getattr(lowercase_, normalizer_state.pop('type' ) )
snake_case = do_lower_case
snake_case = strip_accents
snake_case = tokenize_chinese_chars
snake_case = normalizer_class(**lowercase_ )
snake_case = do_lower_case
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Optional[int]:
snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
| 350 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A , A , A ) -> int | float:
if len(A ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(A )
or left < -len(A )
or right >= len(A )
or right < -len(A )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
snake_case = (left + right) >> 1 # the middle
snake_case = find_max(A , A , A ) # find max in range[left, mid]
snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 332 | 0 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self, lowercase_, lowercase_ = True, lowercase_ = None, lowercase_ = 32, lowercase_ = True, lowercase_ = 1 / 255, lowercase_ = True, lowercase_ = True, lowercase_ = [0.48_145_466, 0.4_578_275, 0.40_821_073], lowercase_ = [0.26_862_954, 0.26_130_258, 0.27_577_711], lowercase_ = True, lowercase_=7, lowercase_=30, lowercase_=400, lowercase_=3, ) -> Dict:
snake_case = parent
snake_case = do_resize
snake_case = size if size is not None else {'shortest_edge': 288}
snake_case = size_divisor
snake_case = do_rescale
snake_case = rescale_factor
snake_case = do_normalize
snake_case = do_center_crop
snake_case = image_mean
snake_case = image_std
snake_case = do_pad
snake_case = batch_size
snake_case = num_channels
snake_case = min_resolution
snake_case = max_resolution
def _lowerCamelCase ( self ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _lowerCamelCase ( self, lowercase_, lowercase_=False ) -> Optional[int]:
if not batched:
snake_case = self.size['shortest_edge']
snake_case = image_inputs[0]
if isinstance(lowercase_, Image.Image ):
snake_case , snake_case = image.size
else:
snake_case , snake_case = image.shape[1], image.shape[2]
snake_case = size / min(lowercase_, lowercase_ )
if h < w:
snake_case , snake_case = size, scale * w
else:
snake_case , snake_case = scale * h, size
snake_case = int((1333 / 800) * size )
if max(lowercase_, lowercase_ ) > max_size:
snake_case = max_size / max(lowercase_, lowercase_ )
snake_case = newh * scale
snake_case = neww * scale
snake_case , snake_case = int(newh + 0.5 ), int(neww + 0.5 )
snake_case , snake_case = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
snake_case = []
for image in image_inputs:
snake_case , snake_case = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case = max(lowercase_, key=lambda lowercase_ : item[0] )[0]
snake_case = max(lowercase_, key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ):
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = BridgeTowerImageProcessingTester(self )
@property
def _lowerCamelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ) -> Dict:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_, 'image_mean' ) )
self.assertTrue(hasattr(lowercase_, 'image_std' ) )
self.assertTrue(hasattr(lowercase_, 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_, 'do_resize' ) )
self.assertTrue(hasattr(lowercase_, 'size' ) )
self.assertTrue(hasattr(lowercase_, 'size_divisor' ) )
def _lowerCamelCase ( self ) -> Tuple:
pass
def _lowerCamelCase ( self ) -> Optional[int]:
# Initialize image processor
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_, Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
snake_case = image_processing(lowercase_, return_tensors='pt' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(lowercase_, batched=lowercase_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def _lowerCamelCase ( self ) -> Tuple:
# Initialize image processor
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase_, numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_, np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
snake_case = image_processing(lowercase_, return_tensors='pt' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(lowercase_, batched=lowercase_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def _lowerCamelCase ( self ) -> Dict:
# Initialize image processor
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase_, torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_, torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
snake_case = image_processing(lowercase_, return_tensors='pt' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(lowercase_, batched=lowercase_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
| 351 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str:
super().__init__()
# pass init params to Encoder
snake_case = Encoder(
in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, )
snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ )
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
# pass init params to Decoder
snake_case = Decoder(
in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput:
snake_case = self.encoder(lowercase_ )
snake_case = self.quant_conv(lowercase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_ )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case , snake_case , snake_case = self.quantize(lowercase_ )
else:
snake_case = h
snake_case = self.post_quant_conv(lowercase_ )
snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case = sample
snake_case = self.encode(lowercase_ ).latents
snake_case = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 332 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
lowerCAmelCase_ = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def __magic_name__ ( ) -> Union[str, Any]:
snake_case = Github(os.environ['GITHUB_TOKEN'] )
snake_case = g.get_repo('huggingface/diffusers' )
snake_case = repo.get_issues(state='open' )
for issue in open_issues:
snake_case = sorted(issue.get_comments() , key=lambda A : i.created_at , reverse=A )
snake_case = comments[0] if len(A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 352 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int:
snake_case = [0]
snake_case = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case = 0
# the area corresponding to the grid that gives the product closest to target
snake_case = 0
# an estimate of b, using the quadratic formula
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the triangle number corresponding to b_floor
snake_case = 42
# the triangle number corresponding to b_ceil
snake_case = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case = floor(A )
snake_case = ceil(A )
snake_case = triangle_numbers[b_floor]
snake_case = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_first_guess * triangle_a
snake_case = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_second_guess * triangle_a
snake_case = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 332 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCAmelCase_ = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __magic_name__ ( UpperCamelCase_ ) -> Tuple:
if isinstance(UpperCamelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase_ , PIL.Image.Image ):
snake_case = [image]
snake_case = [trans(img.convert('RGB' ) ) for img in image]
snake_case = torch.stack(UpperCamelCase_ )
return image
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, lowercase_, lowercase_ ) -> Optional[int]:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_, scheduler=lowercase_ )
def _lowerCamelCase ( self, lowercase_ ) -> Dict:
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> Any:
# get the original timestep using init_timestep
snake_case = min(int(num_inference_steps * strength ), lowercase_ )
snake_case = max(num_inference_steps - init_timestep, 0 )
snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_=None ) -> Union[str, Any]:
if not isinstance(lowercase_, (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' )
snake_case = image.to(device=lowercase_, dtype=lowercase_ )
if isinstance(lowercase_, lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case = init_latents.shape
snake_case = randn_tensor(lowercase_, generator=lowercase_, device=lowercase_, dtype=lowercase_ )
# get latents
print('add noise to latents at timestep', lowercase_ )
snake_case = self.scheduler.add_noise(lowercase_, lowercase_, lowercase_ )
snake_case = init_latents
return latents
@torch.no_grad()
def __call__( self, lowercase_ = None, lowercase_ = 0.8, lowercase_ = 1, lowercase_ = None, lowercase_ = 0.0, lowercase_ = 50, lowercase_ = None, lowercase_ = "pil", lowercase_ = True, ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(lowercase_ )
# 2. Preprocess image
snake_case = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_, device=self.device )
snake_case , snake_case = self.get_timesteps(lowercase_, lowercase_, self.device )
snake_case = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
snake_case = self.prepare_latents(lowercase_, lowercase_, lowercase_, self.unet.dtype, self.device, lowercase_ )
snake_case = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
snake_case = self.unet(lowercase_, lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case = self.scheduler.step(
lowercase_, lowercase_, lowercase_, eta=lowercase_, use_clipped_model_output=lowercase_, generator=lowercase_, ).prev_sample
snake_case = (image / 2 + 0.5).clamp(0, 1 )
snake_case = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
snake_case = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 332 | 0 |
'''simple docstring'''
from typing import List
import numpy as np
def __magic_name__ ( A ) -> int:
snake_case = {key: len(A ) for key, value in gen_kwargs.items() if isinstance(A , A )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
snake_case = max(lists_lengths.values() , default=0 )
return max(1 , A )
def __magic_name__ ( A , A ) -> List[range]:
snake_case = []
for group_idx in range(A ):
snake_case = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
snake_case = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
snake_case = range(A , start + num_shards_to_add )
shards_indices_per_group.append(A )
return shards_indices_per_group
def __magic_name__ ( A , A ) -> List[dict]:
snake_case = _number_of_shards_in_gen_kwargs(A )
if num_shards == 1:
return [dict(A )]
else:
snake_case = _distribute_shards(num_shards=A , max_num_jobs=A )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(A , A )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(A ) )
]
def __magic_name__ ( A ) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , A )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __magic_name__ ( A , A ) -> dict:
snake_case = {len(A ) for value in gen_kwargs.values() if isinstance(A , A )}
snake_case = {}
for size in list_sizes:
snake_case = list(range(A ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
snake_case = dict(A )
for key, value in shuffled_kwargs.items():
if isinstance(A , A ):
snake_case = [value[i] for i in indices_per_size[len(A )]]
return shuffled_kwargs
| 354 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
snake_case_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
snake_case_ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
snake_case_ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
snake_case_ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''train'''
snake_case_ = '''dev'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int:
snake_case = args
snake_case = is_language_sensitive
snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_, lowercase_ ):
try:
snake_case = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
snake_case = mode
# Load data features from cache or dataset file
snake_case = 'v2' if args.version_2_with_negative else 'v1'
snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
snake_case = time.time()
snake_case = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case = self.old_features['features']
snake_case = self.old_features.get('dataset', lowercase_ )
snake_case = self.old_features.get('examples', lowercase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
snake_case = self.processor.get_dev_examples(args.data_dir )
else:
snake_case = self.processor.get_train_examples(args.data_dir )
snake_case , snake_case = squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, )
snake_case = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
snake_case = self.features[i]
snake_case = torch.tensor(feature.input_ids, dtype=torch.long )
snake_case = torch.tensor(feature.attention_mask, dtype=torch.long )
snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long )
snake_case = torch.tensor(feature.cls_index, dtype=torch.long )
snake_case = torch.tensor(feature.p_mask, dtype=torch.float )
snake_case = torch.tensor(feature.is_impossible, dtype=torch.float )
snake_case = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case = torch.tensor(feature.start_position, dtype=torch.long )
snake_case = torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 332 | 0 |
def __magic_name__ ( A , A , A , A ) -> Any:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
snake_case = mf_knapsack(i - 1 , A , A , A )
else:
snake_case = max(
mf_knapsack(i - 1 , A , A , A ) , mf_knapsack(i - 1 , A , A , j - wt[i - 1] ) + val[i - 1] , )
snake_case = val
return f[i][j]
def __magic_name__ ( A , A , A , A ) -> Tuple:
snake_case = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
snake_case = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
snake_case = dp[i - 1][w_]
return dp[n][w_], dp
def __magic_name__ ( A , A , A ) -> List[Any]:
if not (isinstance(A , (list, tuple) ) and isinstance(A , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
snake_case = len(A )
if num_items != len(A ):
snake_case = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(A )} values'''
)
raise ValueError(A )
for i in range(A ):
if not isinstance(wt[i] , A ):
snake_case = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(A )
snake_case , snake_case = knapsack(A , A , A , A )
snake_case = set()
_construct_solution(A , A , A , A , A )
return optimal_val, example_optional_set
def __magic_name__ ( A , A , A , A , A ) -> Optional[int]:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(A , A , i - 1 , A , A )
else:
optimal_set.add(A )
_construct_solution(A , A , i - 1 , j - wt[i - 1] , A )
if __name__ == "__main__":
lowerCAmelCase_ = [3, 2, 4, 4]
lowerCAmelCase_ = [4, 3, 2, 3]
lowerCAmelCase_ = 4
lowerCAmelCase_ = 6
lowerCAmelCase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCAmelCase_ , lowerCAmelCase_ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCAmelCase_ , lowerCAmelCase_ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 355 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A , A , A ) -> Any:
# Initialise PyTorch model
snake_case = BertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case = BertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 332 | 0 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
def __init__( self, lowercase_ ) -> Any:
snake_case = str(id_ )
snake_case = None
snake_case = None
snake_case = []
snake_case = {} # {vertex:distance}
def __lt__( self, lowercase_ ) -> Optional[int]:
return self.key < other.key
def __repr__( self ) -> str:
return self.id
def _lowerCamelCase ( self, lowercase_ ) -> str:
self.neighbors.append(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Union[str, Any]:
snake_case = weight
def __magic_name__ ( A , A , A , A ) -> List[Any]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , A )
graph[b - 1].add_edge(graph[a - 1] , A )
def __magic_name__ ( A , A ) -> list:
snake_case = []
for u in graph:
snake_case = math.inf
snake_case = None
snake_case = 0
snake_case = graph[:]
while q:
snake_case = min(A )
q.remove(A )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
snake_case = u
snake_case = u.edges[v.id]
for i in range(1 , len(A ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __magic_name__ ( A , A ) -> Iterator[tuple]:
for u in graph:
snake_case = math.inf
snake_case = None
snake_case = 0
snake_case = list(A )
hq.heapify(A )
while h:
snake_case = hq.heappop(A )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
snake_case = u
snake_case = u.edges[v.id]
hq.heapify(A )
for i in range(1 , len(A ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __magic_name__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> list:
if len(A ) == 0:
return []
snake_case , snake_case = min(A ), max(A )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 332 | 0 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_ = "pt"
elif is_tf_available():
lowerCAmelCase_ = "tf"
else:
lowerCAmelCase_ = "jax"
class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ):
snake_case_ = PerceiverTokenizer
snake_case_ = False
def _lowerCamelCase ( self ) -> Any:
super().setUp()
snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self ) -> Dict:
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def _lowerCamelCase ( self, **lowercase_ ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=False, lowercase_=20, lowercase_=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
snake_case = []
for i in range(len(lowercase_ ) ):
try:
snake_case = tokenizer.decode([i], clean_up_tokenization_spaces=lowercase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
snake_case = list(filter(lambda lowercase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowercase_ ) )
snake_case = list(filter(lambda lowercase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowercase_ ), lowercase_ ) )
if max_length is not None and len(lowercase_ ) > max_length:
snake_case = toks[:max_length]
if min_length is not None and len(lowercase_ ) < min_length and len(lowercase_ ) > 0:
while len(lowercase_ ) < min_length:
snake_case = toks + toks
# toks_str = [t[1] for t in toks]
snake_case = [t[0] for t in toks]
# Ensure consistency
snake_case = tokenizer.decode(lowercase_, clean_up_tokenization_spaces=lowercase_ )
if " " not in output_txt and len(lowercase_ ) > 1:
snake_case = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowercase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowercase_ )
)
if with_prefix_space:
snake_case = ' ' + output_txt
snake_case = tokenizer.encode(lowercase_, add_special_tokens=lowercase_ )
return output_txt, output_ids
def _lowerCamelCase ( self ) -> List[str]:
snake_case = self.perceiver_tokenizer
snake_case = 'Unicode €.'
snake_case = tokenizer(lowercase_ )
snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'], lowercase_ )
# decoding
snake_case = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_, '[CLS]Unicode €.[SEP]' )
snake_case = tokenizer('e è é ê ë' )
snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'], lowercase_ )
# decoding
snake_case = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = self.perceiver_tokenizer
snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
snake_case = tokenizer(lowercase_, padding=lowercase_, return_tensors=lowercase_ )
self.assertIsInstance(lowercase_, lowercase_ )
if FRAMEWORK != "jax":
snake_case = list(batch.input_ids.numpy()[0] )
else:
snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase_, lowercase_ )
self.assertEqual((2, 38), batch.input_ids.shape )
self.assertEqual((2, 38), batch.attention_mask.shape )
def _lowerCamelCase ( self ) -> str:
snake_case = self.perceiver_tokenizer
snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case = tokenizer(lowercase_, padding=lowercase_, return_tensors=lowercase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowercase_ )
self.assertIn('attention_mask', lowercase_ )
self.assertNotIn('decoder_input_ids', lowercase_ )
self.assertNotIn('decoder_attention_mask', lowercase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.perceiver_tokenizer
snake_case = [
'Summary of the text.',
'Another summary.',
]
snake_case = tokenizer(
text_target=lowercase_, max_length=32, padding='max_length', truncation=lowercase_, return_tensors=lowercase_ )
self.assertEqual(32, targets['input_ids'].shape[1] )
def _lowerCamelCase ( self ) -> str:
# safety check on max_len default value so we are sure the test works
snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case = tempfile.mkdtemp()
snake_case = ' He is very happy, UNwant\u00E9d,running'
snake_case = tokenizer.encode(lowercase_, add_special_tokens=lowercase_ )
tokenizer.save_pretrained(lowercase_ )
snake_case = tokenizer.__class__.from_pretrained(lowercase_ )
snake_case = after_tokenizer.encode(lowercase_, add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_, lowercase_ )
shutil.rmtree(lowercase_ )
snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case = tempfile.mkdtemp()
snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
snake_case = tokenizer.encode(lowercase_, add_special_tokens=lowercase_ )
tokenizer.save_pretrained(lowercase_ )
snake_case = tokenizer.__class__.from_pretrained(lowercase_ )
snake_case = after_tokenizer.encode(lowercase_, add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_, lowercase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
snake_case = tokenizer.__class__.from_pretrained(lowercase_, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(lowercase_ )
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
snake_case = json.load(lowercase_ )
with open(os.path.join(lowercase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
snake_case = json.load(lowercase_ )
snake_case = [F'''<extra_id_{i}>''' for i in range(125 )]
snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowercase_, lowercase_ )
with open(os.path.join(lowercase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowercase_, lowercase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case = tokenizer_class.from_pretrained(
lowercase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowercase_ )]
snake_case = tokenizer_class.from_pretrained(
lowercase_, additional_special_tokens=lowercase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def _lowerCamelCase ( self ) -> Any:
snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ), '�' )
def _lowerCamelCase ( self ) -> str:
pass
def _lowerCamelCase ( self ) -> Optional[Any]:
pass
def _lowerCamelCase ( self ) -> List[Any]:
pass
def _lowerCamelCase ( self ) -> Optional[Any]:
pass
def _lowerCamelCase ( self ) -> Optional[Any]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
snake_case = self.get_tokenizers(fast=lowercase_, do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
snake_case = tokenizer.convert_tokens_to_string(lowercase_ )
self.assertIsInstance(lowercase_, lowercase_ )
| 357 |
'''simple docstring'''
def __magic_name__ ( A ) -> float:
return 1_0 - x * x
def __magic_name__ ( A , A ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(A ) * equation(A ) >= 0:
raise ValueError('Wrong space!' )
snake_case = a
while (b - a) >= 0.01:
# Find middle point
snake_case = (a + b) / 2
# Check if middle point is root
if equation(A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A ) * equation(A ) < 0:
snake_case = c
else:
snake_case = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( A ) -> bool:
snake_case = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __magic_name__ ( A = 5_0_0_0 ) -> int:
snake_case = [(i * (3 * i - 1)) // 2 for i in range(1 , A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A , len(A ) ):
snake_case = pentagonal_nums[j]
snake_case = pentagonal_i + pentagonal_j
snake_case = pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(f"{solution() = }")
| 358 |
'''simple docstring'''
import pytest
lowerCAmelCase_ = "__dummy_dataset1__"
lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def __magic_name__ ( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __magic_name__ ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __magic_name__ ( A , A , A ) -> Optional[int]:
snake_case = dataset_loading_script_name
snake_case = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=A )
snake_case = script_dir / F'''{script_name}.py'''
with open(A , 'w' ) as f:
f.write(A )
return str(A )
| 332 | 0 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> bool:
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase_ = Lock()
def __magic_name__ ( A , A , A , A , A , A , A ) -> Any:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A ) -> str:
snake_case = []
snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case = temp_rs
snake_case = temp_rr
for i in range(1 , len(A ) - 1 ):
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case = temp_rs
snake_case = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ) -> Tuple:
snake_case = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*A )
snake_case = odd_even_transposition(A )
print('Sorted List\n' )
print(*A )
if __name__ == "__main__":
main()
| 332 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCamelCase :
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=False, lowercase_=True, lowercase_=False, lowercase_=True, lowercase_=33, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=37, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=16, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_=None, ) -> Dict:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
snake_case = ids_tensor([self.batch_size], self.num_choices )
snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ) -> Optional[int]:
return EsmConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> List[str]:
snake_case = EsmModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = model(lowercase_, attention_mask=lowercase_ )
snake_case = model(lowercase_ )
snake_case = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> str:
snake_case = EsmForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Union[str, Any]:
snake_case = self.num_labels
snake_case = EsmForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
snake_case_ = False
snake_case_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = ()
snake_case_ = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
def _lowerCamelCase ( self ) -> str:
snake_case = EsmModelTester(self )
snake_case = ConfigTester(self, config_class=lowercase_, hidden_size=37 )
def _lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> List[str]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*lowercase_ )
def _lowerCamelCase ( self ) -> int:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def _lowerCamelCase ( self ) -> Dict:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = EsmModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()[0]
snake_case = EsmEmbeddings(config=lowercase_ )
snake_case = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
snake_case = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
snake_case = create_position_ids_from_input_ids(lowercase_, model.padding_idx )
self.assertEqual(position_ids.shape, expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase_, lowercase_ ) ) )
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()[0]
snake_case = EsmEmbeddings(config=lowercase_ )
snake_case = torch.empty(2, 4, 30 )
snake_case = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
snake_case = torch.as_tensor([expected_single_positions, expected_single_positions] )
snake_case = embeddings.create_position_ids_from_inputs_embeds(lowercase_ )
self.assertEqual(position_ids.shape, expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase_, lowercase_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _lowerCamelCase ( self ) -> Tuple:
pass
@unittest.skip('Esm does not support embedding resizing' )
def _lowerCamelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCamelCase ( self ) -> Any:
pass
@require_torch
class lowerCamelCase ( __lowerCAmelCase ):
@slow
def _lowerCamelCase ( self ) -> str:
with torch.no_grad():
snake_case = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
snake_case = torch.tensor([[0, 1, 2, 3, 4, 5]] )
snake_case = model(lowercase_ )[0]
snake_case = 33
snake_case = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape, lowercase_ )
snake_case = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowercase_, atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ) -> Optional[int]:
with torch.no_grad():
snake_case = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
snake_case = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
snake_case = model(lowercase_ )[0]
# compare the actual values for a slice.
snake_case = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowercase_, atol=1E-4 ) )
| 360 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, lowercase_, lowercase_, lowercase_, lowercase_ = None, ) -> List[str]:
super().__init__()
self.register_modules(transformer=lowercase_, vae=lowercase_, scheduler=lowercase_ )
# create a imagenet -> id dictionary for easier use
snake_case = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
snake_case = int(lowercase_ )
snake_case = dict(sorted(self.labels.items() ) )
def _lowerCamelCase ( self, lowercase_ ) -> List[int]:
if not isinstance(lowercase_, lowercase_ ):
snake_case = list(lowercase_ )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self, lowercase_, lowercase_ = 4.0, lowercase_ = None, lowercase_ = 50, lowercase_ = "pil", lowercase_ = True, ) -> Union[ImagePipelineOutput, Tuple]:
snake_case = len(lowercase_ )
snake_case = self.transformer.config.sample_size
snake_case = self.transformer.config.in_channels
snake_case = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size), generator=lowercase_, device=self.device, dtype=self.transformer.dtype, )
snake_case = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
snake_case = torch.tensor(lowercase_, device=self.device ).reshape(-1 )
snake_case = torch.tensor([1000] * batch_size, device=self.device )
snake_case = torch.cat([class_labels, class_null], 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
snake_case = latent_model_input[: len(lowercase_ ) // 2]
snake_case = torch.cat([half, half], dim=0 )
snake_case = self.scheduler.scale_model_input(lowercase_, lowercase_ )
snake_case = t
if not torch.is_tensor(lowercase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
snake_case = latent_model_input.device.type == 'mps'
if isinstance(lowercase_, lowercase_ ):
snake_case = torch.floataa if is_mps else torch.floataa
else:
snake_case = torch.intaa if is_mps else torch.intaa
snake_case = torch.tensor([timesteps], dtype=lowercase_, device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
snake_case = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
snake_case = self.transformer(
lowercase_, timestep=lowercase_, class_labels=lowercase_ ).sample
# perform guidance
if guidance_scale > 1:
snake_case , snake_case = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
snake_case , snake_case = torch.split(lowercase_, len(lowercase_ ) // 2, dim=0 )
snake_case = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
snake_case = torch.cat([half_eps, half_eps], dim=0 )
snake_case = torch.cat([eps, rest], dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
snake_case , snake_case = torch.split(lowercase_, lowercase_, dim=1 )
else:
snake_case = noise_pred
# compute previous image: x_t -> x_t-1
snake_case = self.scheduler.step(lowercase_, lowercase_, lowercase_ ).prev_sample
if guidance_scale > 1:
snake_case , snake_case = latent_model_input.chunk(2, dim=0 )
else:
snake_case = latent_model_input
snake_case = 1 / self.vae.config.scaling_factor * latents
snake_case = self.vae.decode(lowercase_ ).sample
snake_case = (samples / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case = samples.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
snake_case = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase_ )
| 361 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''roberta'''
def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple:
super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int:
snake_case = [0 for i in range(n + 1 )]
snake_case = 1
snake_case = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A ):
snake_case = 1
snake_case = 0
for i in range(A ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"{solution() = }")
| 362 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase :
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=99, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=37, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=16, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_=None, ) -> Dict:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
snake_case = self.vocab_size - 1
def _lowerCamelCase ( self ) -> int:
snake_case = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
snake_case = ids_tensor([self.batch_size], self.num_choices )
snake_case = OpenAIGPTConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
snake_case = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, *lowercase_ ) -> int:
snake_case = OpenAIGPTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = model(lowercase_, token_type_ids=lowercase_, head_mask=lowercase_ )
snake_case = model(lowercase_, token_type_ids=lowercase_ )
snake_case = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, *lowercase_ ) -> Optional[int]:
snake_case = OpenAIGPTLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, *lowercase_ ) -> List[Any]:
snake_case = OpenAIGPTDoubleHeadsModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, *lowercase_ ) -> int:
snake_case = self.num_labels
snake_case = OpenAIGPTForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case = model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self ) -> List[str]:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
snake_case_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case_ = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_=False ) -> Union[str, Any]:
snake_case = super()._prepare_for_class(lowercase_, lowercase_, return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=lowercase_, )
snake_case = inputs_dict['labels']
snake_case = inputs_dict['labels']
snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=lowercase_, )
snake_case = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
return inputs_dict
def _lowerCamelCase ( self ) -> int:
snake_case = OpenAIGPTModelTester(self )
snake_case = ConfigTester(self, config_class=lowercase_, n_embd=37 )
def _lowerCamelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase_ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase_ )
def _lowerCamelCase ( self ) -> List[str]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_ )
@slow
def _lowerCamelCase ( self ) -> List[str]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = OpenAIGPTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ) -> str:
snake_case = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(lowercase_ )
snake_case = torch.tensor([[481, 4735, 544]], dtype=torch.long, device=lowercase_ ) # the president is
snake_case = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case = model.generate(lowercase_, do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist(), lowercase_ )
| 363 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __magic_name__ ( A ) -> Tuple:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __magic_name__ ( A , A ) -> Optional[int]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __magic_name__ ( A ) -> List[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def __magic_name__ ( ) -> Dict:
snake_case = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __magic_name__ ( A , A , A , A ) -> int:
snake_case = 'imagenet-1k-id2label.json'
snake_case = 1_0_0_0
snake_case = 'huggingface/label-files'
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) )
snake_case = {int(A ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 2_0]
snake_case = [3, 1_2, 1_6]
snake_case = [1_9_2, 7_6_8, 1_0_2_4]
snake_case = CvtForImageClassification(A )
snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
snake_case = image_size
snake_case = torch.load(A , map_location=torch.device('cpu' ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(A )
snake_case = list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(A , A )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=3_8_4,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 332 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''ibert'''
def __init__( self, lowercase_=30522, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=False, lowercase_="none", **lowercase_, ) -> str:
super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = quant_mode
snake_case = force_dequant
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 364 |
'''simple docstring'''
from pathlib import Path
import fire
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = Path(A )
snake_case = Path(A )
dest_dir.mkdir(exist_ok=A )
for path in src_dir.iterdir():
snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case = dest_dir.joinpath(path.name )
print(A )
dest_path.open('w' ).write('\n'.join(A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 332 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ):
snake_case_ = FunnelTokenizer
snake_case_ = FunnelTokenizerFast
snake_case_ = True
snake_case_ = True
def _lowerCamelCase ( self ) -> str:
super().setUp()
snake_case = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _lowerCamelCase ( self, **lowercase_ ) -> Tuple:
return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowercase_ )
def _lowerCamelCase ( self, **lowercase_ ) -> Optional[int]:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowercase_ )
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = 'UNwant\u00E9d,running'
snake_case = 'unwanted, running'
return input_text, output_text
def _lowerCamelCase ( self ) -> Dict:
snake_case = self.tokenizer_class(self.vocab_file )
snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowercase_, ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ), [7, 4, 5, 10, 8, 9] )
def _lowerCamelCase ( self ) -> Any:
snake_case = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
snake_case = tokenizer('UNwant\u00E9d,running' )
snake_case = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'], [2] + [0] * sentence_len )
snake_case = tokenizer('UNwant\u00E9d,running', 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'], [2] + [0] * sentence_len + [1] * sentence_len )
| 365 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''roberta'''
def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple:
super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 367 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase_ = False
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 12
@property
def _lowerCamelCase ( self ) -> Dict:
return 12
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 32
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case = VQModel(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, )
return model
@property
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModel(lowercase_ )
@property
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case = 12
snake_case = 12
snake_case = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
snake_case = TransformeraDModel(**lowercase_ )
return model
def _lowerCamelCase ( self ) -> Tuple:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
snake_case = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipeline(
'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', )
snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 332 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCAmelCase )
class lowerCamelCase ( __lowerCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
snake_case_ = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''text''': Value('''string''' )} )
snake_case_ = Features({'''labels''': ClassLabel} )
snake_case_ = '''text'''
snake_case_ = '''labels'''
def _lowerCamelCase ( self, lowercase_ ) -> Tuple:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column], lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
snake_case = copy.deepcopy(self )
snake_case = self.label_schema.copy()
snake_case = features[self.label_column]
snake_case = label_schema
return task_template
@property
def _lowerCamelCase ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 368 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger("transformers.models.encodec")
lowerCAmelCase_ = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
lowerCAmelCase_ = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
lowerCAmelCase_ = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
lowerCAmelCase_ = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
lowerCAmelCase_ = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
lowerCAmelCase_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCAmelCase_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __magic_name__ ( A , A , A , A , A ) -> int:
for attribute in key.split('.' ):
snake_case = getattr(A , A )
if weight_type is not None:
snake_case = getattr(A , A ).shape
else:
snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
elif weight_type == "running_mean":
snake_case = value
elif weight_type == "running_var":
snake_case = value
elif weight_type == "num_batches_tracked":
snake_case = value
elif weight_type == "weight_ih_l0":
snake_case = value
elif weight_type == "weight_hh_l0":
snake_case = value
elif weight_type == "bias_ih_l0":
snake_case = value
elif weight_type == "bias_hh_l0":
snake_case = value
elif weight_type == "weight_ih_l1":
snake_case = value
elif weight_type == "weight_hh_l1":
snake_case = value
elif weight_type == "bias_ih_l1":
snake_case = value
elif weight_type == "bias_hh_l1":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def __magic_name__ ( A , A ) -> str:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case , snake_case = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = []
if model_name == "encodec_24khz" or "encodec_32khz":
snake_case = MAPPING_24K
elif model_name == "encodec_48khz":
snake_case = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(A , A ):
logger.info(F'''{name} was ignored''' )
continue
snake_case = False
for key, mapped_key in MAPPING.items():
if "*" in key:
snake_case , snake_case = key.split('.*.' )
if prefix in name and suffix in name:
snake_case = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
snake_case = True
if "*" in mapped_key:
snake_case = name.split(A )[0].split('.' )[-2]
snake_case = mapped_key.replace('*' , A )
if "weight_g" in name:
snake_case = 'weight_g'
elif "weight_v" in name:
snake_case = 'weight_v'
elif "weight_ih_l0" in name:
snake_case = 'weight_ih_l0'
elif "weight_hh_l0" in name:
snake_case = 'weight_hh_l0'
elif "bias_ih_l0" in name:
snake_case = 'bias_ih_l0'
elif "bias_hh_l0" in name:
snake_case = 'bias_hh_l0'
elif "weight_ih_l1" in name:
snake_case = 'weight_ih_l1'
elif "weight_hh_l1" in name:
snake_case = 'weight_hh_l1'
elif "bias_ih_l1" in name:
snake_case = 'bias_ih_l1'
elif "bias_hh_l1" in name:
snake_case = 'bias_hh_l1'
elif "bias" in name:
snake_case = 'bias'
elif "weight" in name:
snake_case = 'weight'
elif "running_mean" in name:
snake_case = 'running_mean'
elif "running_var" in name:
snake_case = 'running_var'
elif "num_batches_tracked" in name:
snake_case = 'num_batches_tracked'
else:
snake_case = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def __magic_name__ ( A , A , A , A=None , A=None , ) -> List[str]:
if config_path is not None:
snake_case = EncodecConfig.from_pretrained(A )
else:
snake_case = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
snake_case = [8, 5, 4, 4]
snake_case = [2.2]
snake_case = 6_4
snake_case = 3_2_0_0_0
snake_case = 2_0_4_8
snake_case = False
snake_case = False
snake_case = False
elif model_name == "encodec_48khz":
snake_case = [8, 5, 4, 2]
snake_case = [3.0, 6.0, 12.0, 24.0]
snake_case = 4_8_0_0_0
snake_case = 2
snake_case = False
snake_case = 'time_group_norm'
snake_case = True
snake_case = 1.0
snake_case = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
snake_case = EncodecModel(A )
snake_case = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(A )
snake_case = torch.load(A )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
snake_case = original_checkpoint['best_state']
recursively_load_weights(A , A , A )
model.save_pretrained(A )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(A )
model.push_to_hub(A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowerCAmelCase_ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 369 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 332 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, lowercase_, lowercase_ ) -> Tuple:
super().__init__()
self.register_modules(unet=lowercase_, scheduler=lowercase_ )
@torch.no_grad()
def __call__( self, lowercase_ = 1, lowercase_ = None, lowercase_ = 50, lowercase_ = "pil", lowercase_ = True, **lowercase_, ) -> Union[ImagePipelineOutput, Tuple]:
snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=lowercase_, )
snake_case = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case = self.unet(lowercase_, lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case = self.scheduler.step(lowercase_, lowercase_, lowercase_ ).prev_sample
snake_case = (image / 2 + 0.5).clamp(0, 1 )
snake_case = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
snake_case = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowercase_ ), "This is a local test"
| 370 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> Any:
plt.scatter(A , A , color='red' )
plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 332 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
def __magic_name__ ( A , A ) -> str:
return (preds == labels).mean()
@dataclass
class lowerCamelCase :
snake_case_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class lowerCamelCase :
snake_case_ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
snake_case_ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
snake_case_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __magic_name__ ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case , snake_case , snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A )
# Set seed
set_seed(training_args.seed )
try:
snake_case = processors[data_args.task_name]()
snake_case = processor.get_labels()
snake_case = len(A )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(A ) -> Dict:
snake_case = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(A , p.label_ids )}
# Data collator
snake_case = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case = trainer.evaluate()
snake_case = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
results.update(A )
return results
def __magic_name__ ( A ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 371 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ''''''
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str:
super().__init__(self, **lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case = fsspec.open(
lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case = os.path.basename(self.file.path.split('::' )[0] )
snake_case = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case = None
@classmethod
def _lowerCamelCase ( cls, lowercase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('/' )
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.dir_cache is None:
snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case = {f['name']: f}
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.file.open().read()
def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any:
snake_case = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''bz2'''
snake_case_ = '''bz2'''
snake_case_ = '''.bz2'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''gzip'''
snake_case_ = '''gzip'''
snake_case_ = '''.gz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''lz4'''
snake_case_ = '''lz4'''
snake_case_ = '''.lz4'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''xz'''
snake_case_ = '''xz'''
snake_case_ = '''.xz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''zstd'''
snake_case_ = '''zstd'''
snake_case_ = '''.zst'''
def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]:
super().__init__(
fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case = self.file.__enter__
class lowerCamelCase :
def __init__( self, lowercase_ ) -> List[Any]:
snake_case = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self, *lowercase_, **lowercase_ ) -> Dict:
self._file.__exit__(*lowercase_, **lowercase_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _lowerCamelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self, lowercase_ ) -> List[Any]:
return getattr(self._file, lowercase_ )
def fixed_enter(*lowercase_, **lowercase_ ):
return WrappedFile(_enter(*lowercase_, **lowercase_ ) )
snake_case = fixed_enter
| 332 | 0 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def __magic_name__ ( A ) -> str:
def decorator(A ):
snake_case = getattr(A , 'handle_key' , [] )
handle += [key]
setattr(A , 'handle_key' , A )
return func
return decorator
def __magic_name__ ( *A ) -> Any:
def decorator(A ):
snake_case = getattr(A , 'handle_key' , [] )
handle += keys
setattr(A , 'handle_key' , A )
return func
return decorator
class lowerCamelCase ( __lowerCAmelCase ):
def __new__( cls, lowercase_, lowercase_, lowercase_ ) -> Dict:
snake_case = super().__new__(cls, lowercase_, lowercase_, lowercase_ )
if not hasattr(lowercase_, 'key_handler' ):
setattr(lowercase_, 'key_handler', {} )
setattr(lowercase_, 'handle_input', KeyHandler.handle_input )
for value in attrs.values():
snake_case = getattr(lowercase_, 'handle_key', [] )
for key in handled_keys:
snake_case = value
return new_cls
@staticmethod
def _lowerCamelCase ( cls ) -> Union[str, Any]:
snake_case = get_character()
if char != KEYMAP["undefined"]:
snake_case = ord(lowercase_ )
snake_case = cls.key_handler.get(lowercase_ )
if handler:
snake_case = char
return handler(cls )
else:
return None
def __magic_name__ ( cls ) -> List[str]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 350 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A , A , A ) -> int | float:
if len(A ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(A )
or left < -len(A )
or right >= len(A )
or right < -len(A )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
snake_case = (left + right) >> 1 # the middle
snake_case = find_max(A , A , A ) # find max in range[left, mid]
snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 332 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase :
snake_case_ = 42
snake_case_ = None
snake_case_ = None
lowerCAmelCase_ = namedtuple("CoinsDistribResult", "moves excess")
def __magic_name__ ( A ) -> int:
if root is None:
return 0
# Validation
def count_nodes(A ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A ) != count_coins(A ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(A ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
snake_case , snake_case = get_distrib(node.left )
snake_case , snake_case = get_distrib(node.right )
snake_case = 1 - left_distrib_excess
snake_case = 1 - right_distrib_excess
snake_case = (
left_distrib_moves
+ right_distrib_moves
+ abs(A )
+ abs(A )
)
snake_case = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A , A )
return get_distrib(A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str:
super().__init__()
# pass init params to Encoder
snake_case = Encoder(
in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, )
snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ )
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
# pass init params to Decoder
snake_case = Decoder(
in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput:
snake_case = self.encoder(lowercase_ )
snake_case = self.quant_conv(lowercase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_ )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case , snake_case , snake_case = self.quantize(lowercase_ )
else:
snake_case = h
snake_case = self.post_quant_conv(lowercase_ )
snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case = sample
snake_case = self.encode(lowercase_ ).latents
snake_case = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 332 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''swin2sr'''
snake_case_ = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self, lowercase_=64, lowercase_=1, lowercase_=3, lowercase_=180, lowercase_=[6, 6, 6, 6, 6, 6], lowercase_=[6, 6, 6, 6, 6, 6], lowercase_=8, lowercase_=2.0, lowercase_=True, lowercase_=0.0, lowercase_=0.0, lowercase_=0.1, lowercase_="gelu", lowercase_=False, lowercase_=0.02, lowercase_=1E-5, lowercase_=2, lowercase_=1.0, lowercase_="1conv", lowercase_="pixelshuffle", **lowercase_, ) -> Union[str, Any]:
super().__init__(**lowercase_ )
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = embed_dim
snake_case = depths
snake_case = len(lowercase_ )
snake_case = num_heads
snake_case = window_size
snake_case = mlp_ratio
snake_case = qkv_bias
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = drop_path_rate
snake_case = hidden_act
snake_case = use_absolute_embeddings
snake_case = layer_norm_eps
snake_case = initializer_range
snake_case = upscale
snake_case = img_range
snake_case = resi_connection
snake_case = upsampler
| 352 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int:
snake_case = [0]
snake_case = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case = 0
# the area corresponding to the grid that gives the product closest to target
snake_case = 0
# an estimate of b, using the quadratic formula
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the triangle number corresponding to b_floor
snake_case = 42
# the triangle number corresponding to b_ceil
snake_case = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case = floor(A )
snake_case = ceil(A )
snake_case = triangle_numbers[b_floor]
snake_case = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_first_guess * triangle_a
snake_case = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_second_guess * triangle_a
snake_case = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 332 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __magic_name__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
snake_case = WavaVecaForSequenceClassification.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
snake_case = downstream_dict['projector.weight']
snake_case = downstream_dict['projector.bias']
snake_case = downstream_dict['model.post_net.linear.weight']
snake_case = downstream_dict['model.post_net.linear.bias']
return model
def __magic_name__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
snake_case = WavaVecaForAudioFrameClassification.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
snake_case = downstream_dict['model.linear.weight']
snake_case = downstream_dict['model.linear.bias']
return model
def __magic_name__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
snake_case = WavaVecaForXVector.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
snake_case = downstream_dict['connector.weight']
snake_case = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
snake_case = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
snake_case = downstream_dict['objective.W']
return model
@torch.no_grad()
def __magic_name__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
snake_case = torch.load(UpperCamelCase_ , map_location='cpu' )
snake_case = checkpoint['Downstream']
snake_case = WavaVecaConfig.from_pretrained(UpperCamelCase_ )
snake_case = WavaVecaFeatureExtractor.from_pretrained(
UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , do_normalize=UpperCamelCase_ )
snake_case = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
snake_case = convert_classification(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
elif arch.endswith('ForAudioFrameClassification' ):
snake_case = convert_diarization(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
elif arch.endswith('ForXVector' ):
snake_case = convert_xvector(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
snake_case = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(UpperCamelCase_ )
hf_model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
lowerCAmelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( A ) -> Optional[Any]:
snake_case = len(A )
while cur > 1:
# Find the maximum number in arr
snake_case = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
snake_case = arr[mi::-1] + arr[mi + 1 : len(A )]
# Reverse whole list
snake_case = arr[cur - 1 :: -1] + arr[cur : len(A )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase_ = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 354 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
snake_case_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
snake_case_ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
snake_case_ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
snake_case_ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''train'''
snake_case_ = '''dev'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int:
snake_case = args
snake_case = is_language_sensitive
snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_, lowercase_ ):
try:
snake_case = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
snake_case = mode
# Load data features from cache or dataset file
snake_case = 'v2' if args.version_2_with_negative else 'v1'
snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
snake_case = time.time()
snake_case = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case = self.old_features['features']
snake_case = self.old_features.get('dataset', lowercase_ )
snake_case = self.old_features.get('examples', lowercase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
snake_case = self.processor.get_dev_examples(args.data_dir )
else:
snake_case = self.processor.get_train_examples(args.data_dir )
snake_case , snake_case = squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, )
snake_case = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
snake_case = self.features[i]
snake_case = torch.tensor(feature.input_ids, dtype=torch.long )
snake_case = torch.tensor(feature.attention_mask, dtype=torch.long )
snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long )
snake_case = torch.tensor(feature.cls_index, dtype=torch.long )
snake_case = torch.tensor(feature.p_mask, dtype=torch.float )
snake_case = torch.tensor(feature.is_impossible, dtype=torch.float )
snake_case = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case = torch.tensor(feature.start_position, dtype=torch.long )
snake_case = torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 332 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase_ = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 355 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A , A , A ) -> Any:
# Initialise PyTorch model
snake_case = BertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case = BertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 332 | 0 |
'''simple docstring'''
import argparse
import json
import subprocess
def __magic_name__ ( A , A ) -> Optional[Any]:
snake_case = []
snake_case = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
snake_case = subprocess.run(A , shell=A , stdout=subprocess.PIPE )
snake_case = output.stdout.decode('utf-8' )
snake_case = json.loads(A )
snake_case = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(A )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(A ) )
if len(A ) > 0:
snake_case = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def __magic_name__ ( A ) -> int:
return values.split(',' )
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
lowerCAmelCase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 356 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> list:
if len(A ) == 0:
return []
snake_case , snake_case = min(A ), max(A )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 332 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 357 |
'''simple docstring'''
def __magic_name__ ( A ) -> float:
return 1_0 - x * x
def __magic_name__ ( A , A ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(A ) * equation(A ) >= 0:
raise ValueError('Wrong space!' )
snake_case = a
while (b - a) >= 0.01:
# Find middle point
snake_case = (a + b) / 2
# Check if middle point is root
if equation(A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A ) * equation(A ) < 0:
snake_case = c
else:
snake_case = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 332 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __magic_name__ ( A , A , A , A , ) -> list[float]:
snake_case , snake_case = coefficient_matrix.shape
snake_case , snake_case = constant_matrix.shape
if rowsa != colsa:
snake_case = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(A )
if colsa != 1:
snake_case = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(A )
if rowsa != rowsa:
snake_case = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(A )
if len(A ) != rowsa:
snake_case = (
'Number of initial values must be equal to number of rows in coefficient '
F'''matrix but received {len(A )} and {rowsa}'''
)
raise ValueError(A )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
snake_case = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
snake_case , snake_case = table.shape
strictly_diagonally_dominant(A )
# Iterates the whole matrix for given number of times
for _ in range(A ):
snake_case = []
for row in range(A ):
snake_case = 0
for col in range(A ):
if col == row:
snake_case = table[row][col]
elif col == cols - 1:
snake_case = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
snake_case = (temp + val) / denom
new_val.append(A )
snake_case = new_val
return [float(A ) for i in new_val]
def __magic_name__ ( A ) -> bool:
snake_case , snake_case = table.shape
snake_case = True
for i in range(0 , A ):
snake_case = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
import pytest
lowerCAmelCase_ = "__dummy_dataset1__"
lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def __magic_name__ ( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __magic_name__ ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __magic_name__ ( A , A , A ) -> Optional[int]:
snake_case = dataset_loading_script_name
snake_case = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=A )
snake_case = script_dir / F'''{script_name}.py'''
with open(A , 'w' ) as f:
f.write(A )
return str(A )
| 332 | 0 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __magic_name__ ( A ) -> float:
return np.dot(A , A )
class lowerCamelCase :
def __init__( self, *,
lowercase_ = np.inf, lowercase_ = "linear", lowercase_ = 0.0, ) -> None:
snake_case = regularization
snake_case = gamma
if kernel == "linear":
snake_case = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma, (float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
snake_case = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
snake_case = F'''Unknown kernel: {kernel}'''
raise ValueError(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> float:
return np.dot(lowercase_, lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> None:
snake_case = observations
snake_case = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((snake_case ) , ) = np.shape(lowercase_ )
def to_minimize(lowercase_ ) -> float:
snake_case = 0
((snake_case ) , ) = np.shape(lowercase_ )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i], observations[j] )
)
return 1 / 2 * s - sum(lowercase_ )
snake_case = LinearConstraint(lowercase_, 0, 0 )
snake_case = Bounds(0, self.regularization )
snake_case = minimize(
lowercase_, np.ones(lowercase_ ), bounds=lowercase_, constraints=[ly_contraint] ).x
snake_case = l_star
# calculating mean offset of separation plane to points
snake_case = 0
for i in range(lowercase_ ):
for j in range(lowercase_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i], observations[j] )
snake_case = s / n
def _lowerCamelCase ( self, lowercase_ ) -> int:
snake_case = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n], lowercase_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase_ = Lock()
def __magic_name__ ( A , A , A , A , A , A , A ) -> Any:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A ) -> str:
snake_case = []
snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case = temp_rs
snake_case = temp_rr
for i in range(1 , len(A ) - 1 ):
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case = temp_rs
snake_case = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ) -> Tuple:
snake_case = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*A )
snake_case = odd_even_transposition(A )
print('Sorted List\n' )
print(*A )
if __name__ == "__main__":
main()
| 332 | 0 |
'''simple docstring'''
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __magic_name__ ( A ) -> int:
snake_case = botoa.client('iam' )
snake_case = {
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A , AssumeRolePolicyDocument=json.dumps(A , indent=2 ) )
snake_case = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def __magic_name__ ( A ) -> Any:
snake_case = botoa.client('iam' )
return iam_client.get_role(RoleName=A )["Role"]["Arn"]
def __magic_name__ ( ) -> List[str]:
snake_case = _ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , A , )
snake_case = None
if credentials_configuration == 0:
snake_case = _ask_field('Enter your AWS Profile name: [default] ' , default='default' )
snake_case = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
snake_case = _ask_field('AWS Access Key ID: ' )
snake_case = aws_access_key_id
snake_case = _ask_field('AWS Secret Access Key: ' )
snake_case = aws_secret_access_key
snake_case = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
snake_case = aws_region
snake_case = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , A , )
if role_management == 0:
snake_case = _ask_field('Enter your IAM role name: ' )
else:
snake_case = 'accelerate_sagemaker_execution_role'
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A )
snake_case = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
snake_case = None
if is_custom_docker_image:
snake_case = _ask_field('Enter your Docker image: ' , lambda A : str(A ).lower() )
snake_case = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
snake_case = None
if is_sagemaker_inputs_enabled:
snake_case = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda A : str(A ).lower() , )
snake_case = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
snake_case = None
if is_sagemaker_metrics_enabled:
snake_case = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda A : str(A ).lower() , )
snake_case = _ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
snake_case = {}
snake_case = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
if use_dynamo:
snake_case = 'dynamo_'
snake_case = _ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
if use_custom_options:
snake_case = _ask_options(
'Which mode do you want to use?' , A , lambda A : TORCH_DYNAMO_MODES[int(A )] , default='default' , )
snake_case = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
snake_case = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
snake_case = 'Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
snake_case = _ask_options(
A , A , lambda A : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case = _ask_field(A , lambda A : str(A ).lower() , default='ml.p3.2xlarge' )
snake_case = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case = _ask_field(
'How many machines do you want use? [1]: ' , A , default=1 , )
snake_case = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=A , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A , use_cpu=A , dynamo_config=A , eca_instance_type=A , profile=A , region=A , iam_role_name=A , mixed_precision=A , num_machines=A , sagemaker_inputs_file=A , sagemaker_metrics_file=A , )
| 360 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCamelCase ( unittest.TestCase ):
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=99, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=37, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=16, lowercase_=2, lowercase_=0.02, lowercase_=4, ) -> Dict:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_attention_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_choices
def _lowerCamelCase ( self ) -> int:
snake_case = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case = None
if self.use_attention_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
snake_case = AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowercase_, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ) -> str:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ):
snake_case_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ) -> Tuple:
snake_case = FlaxAlbertModelTester(self )
@slow
def _lowerCamelCase ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
snake_case = model_class_name.from_pretrained('albert-base-v2' )
snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase_ )
@require_flax
class lowerCamelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ) -> List[str]:
snake_case = FlaxAlbertModel.from_pretrained('albert-base-v2' )
snake_case = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case = model(lowercase_, attention_mask=lowercase_ )[0]
snake_case = (1, 11, 768)
self.assertEqual(output.shape, lowercase_ )
snake_case = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], lowercase_, atol=1E-4 ) )
| 361 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''roberta'''
def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple:
super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 332 | 0 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __magic_name__ ( A ) -> int:
snake_case = R'\w+[.]\d+'
snake_case = re.findall(A , A )
for pat in pats:
snake_case = key.replace(A , '_'.join(pat.split('.' ) ) )
return key
def __magic_name__ ( A , A , A ) -> Any:
snake_case = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
snake_case = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
snake_case = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
snake_case = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
snake_case = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
snake_case = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __magic_name__ ( A , A , A=4_2 ) -> str:
# Step 1: Convert pytorch tensor to numpy
snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
snake_case = flax_model.init_weights(PRNGKey(A ) )
snake_case = flatten_dict(A )
snake_case = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case = rename_key(A )
snake_case = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
snake_case , snake_case = rename_key_and_reshape_tensor(A , A , A )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
snake_case = jnp.asarray(A )
return unflatten_dict(A )
| 362 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332 | 0 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_ = 1_6
lowerCAmelCase_ = 3_2
def __magic_name__ ( A , A = 1_6 , A = "bert-base-cased" ) -> Optional[int]:
snake_case = AutoTokenizer.from_pretrained(A )
snake_case = load_dataset('glue' , 'mrpc' )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case = datasets.map(
A , batched=A , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(A , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
snake_case = DataLoader(
tokenized_datasets['train'] , shuffle=A , collate_fn=A , batch_size=A )
snake_case = DataLoader(
tokenized_datasets['validation'] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __magic_name__ ( A , A , A , A ) -> str:
model.eval()
snake_case = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case = model(**A )
snake_case = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case , snake_case = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
snake_case = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
snake_case = metric.compute()
return eval_metric["accuracy"]
def __magic_name__ ( A , A ) -> int:
# Initialize accelerator
snake_case = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case = config['lr']
snake_case = int(config['num_epochs'] )
snake_case = int(config['seed'] )
snake_case = int(config['batch_size'] )
snake_case = args.model_name_or_path
set_seed(A )
snake_case , snake_case = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
snake_case = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
snake_case = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
snake_case = 1
snake_case = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
snake_case = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
snake_case = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case = 0
snake_case = evaluate.load('glue' , 'mrpc' )
snake_case = num_epochs
if args.partial_train_epoch is not None:
snake_case = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case = args.resume_from_checkpoint.split('epoch_' )[1]
snake_case = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case = int(A ) + 1
snake_case = evaluation_loop(A , A , A , A )
accelerator.print('resumed checkpoint performance:' , A )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , 'r' ) as f:
snake_case = json.load(A )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
snake_case = model(**A )
snake_case = outputs.loss
snake_case = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case = F'''epoch_{epoch}'''
snake_case = os.path.join(args.output_dir , A )
accelerator.save_state(A )
snake_case = evaluation_loop(A , A , A , A )
snake_case = accuracy
snake_case = lr_scheduler.get_lr()[0]
snake_case = optimizer.param_groups[0]['lr']
snake_case = epoch
snake_case = overall_step
accelerator.print(F'''epoch {epoch}:''' , A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , 'w' ) as f:
json.dump(A , A )
def __magic_name__ ( ) -> Any:
snake_case = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=A , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=A , )
parser.add_argument(
'--output_dir' , type=A , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=A , default=A , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=A , default=A , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=A , default=2 , help='Number of train epochs.' , )
snake_case = parser.parse_args()
snake_case = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(A , A )
if __name__ == "__main__":
main()
| 363 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __magic_name__ ( A ) -> Tuple:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __magic_name__ ( A , A ) -> Optional[int]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __magic_name__ ( A ) -> List[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def __magic_name__ ( ) -> Dict:
snake_case = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __magic_name__ ( A , A , A , A ) -> int:
snake_case = 'imagenet-1k-id2label.json'
snake_case = 1_0_0_0
snake_case = 'huggingface/label-files'
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) )
snake_case = {int(A ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 2_0]
snake_case = [3, 1_2, 1_6]
snake_case = [1_9_2, 7_6_8, 1_0_2_4]
snake_case = CvtForImageClassification(A )
snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
snake_case = image_size
snake_case = torch.load(A , map_location=torch.device('cpu' ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(A )
snake_case = list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(A , A )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=3_8_4,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 332 | 0 |
'''simple docstring'''
import pytest
lowerCAmelCase_ = "__dummy_dataset1__"
lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def __magic_name__ ( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __magic_name__ ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __magic_name__ ( A , A , A ) -> Optional[int]:
snake_case = dataset_loading_script_name
snake_case = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=A )
snake_case = script_dir / F'''{script_name}.py'''
with open(A , 'w' ) as f:
f.write(A )
return str(A )
| 364 |
'''simple docstring'''
from pathlib import Path
import fire
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = Path(A )
snake_case = Path(A )
dest_dir.mkdir(exist_ok=A )
for path in src_dir.iterdir():
snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case = dest_dir.joinpath(path.name )
print(A )
dest_path.open('w' ).write('\n'.join(A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 332 | 0 |
'''simple docstring'''
import os
def __magic_name__ ( A ) -> List[Any]:
snake_case = len(grid[0] )
snake_case = len(A )
snake_case = 0
snake_case = 0
snake_case = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(A ):
for j in range(n_rows - 3 ):
snake_case = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
snake_case = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
snake_case = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
snake_case = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
snake_case = max(
A , A , A , A )
if max_product > largest:
snake_case = max_product
return largest
def __magic_name__ ( ) -> int:
snake_case = []
with open(os.path.dirname(A ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
snake_case = [[int(A ) for i in grid[j]] for j in range(len(A ) )]
return largest_product(A )
if __name__ == "__main__":
print(solution())
| 365 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332 | 0 |
'''simple docstring'''
lowerCAmelCase_ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase_ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase_ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 | 0 |
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase_ = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
lowerCAmelCase_ = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = get_test_to_tester_mapping(lowercase_ )
snake_case = get_test_to_tester_mapping(lowercase_ )
snake_case = {'BertModelTest': 'BertModelTester'}
snake_case = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowercase_ ), lowercase_ )
self.assertEqual(get_test_info.to_json(lowercase_ ), lowercase_ )
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = get_model_to_test_mapping(lowercase_ )
snake_case = get_model_to_test_mapping(lowercase_ )
snake_case = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
snake_case = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowercase_ ), lowercase_ )
self.assertEqual(get_test_info.to_json(lowercase_ ), lowercase_ )
def _lowerCamelCase ( self ) -> str:
snake_case = get_model_to_tester_mapping(lowercase_ )
snake_case = get_model_to_tester_mapping(lowercase_ )
snake_case = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
snake_case = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowercase_ ), lowercase_ )
self.assertEqual(get_test_info.to_json(lowercase_ ), lowercase_ )
| 367 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase_ = False
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 12
@property
def _lowerCamelCase ( self ) -> Dict:
return 12
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 32
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case = VQModel(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, )
return model
@property
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModel(lowercase_ )
@property
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case = 12
snake_case = 12
snake_case = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
snake_case = TransformeraDModel(**lowercase_ )
return model
def _lowerCamelCase ( self ) -> Tuple:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
snake_case = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipeline(
'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', )
snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 332 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( A ) -> int:
if not nums:
return 0
snake_case = nums[0]
snake_case = 0
for num in nums[1:]:
snake_case , snake_case = (
max_excluding + num,
max(A , A ),
)
return max(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 369 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 332 | 0 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __magic_name__ ( A , A = "cpu" , A = None ) -> None:
snake_case = torch.load(A , map_location=A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(A , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
snake_case = v.half()
if save_path is None: # overwrite src_path
snake_case = src_path
torch.save(A , A )
if __name__ == "__main__":
fire.Fire(convert)
| 370 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> Any:
plt.scatter(A , A , color='red' )
plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 332 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCamelCase :
def __init__( self, lowercase_, lowercase_=14, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=False, lowercase_=True, lowercase_=99, lowercase_=32, lowercase_=4, lowercase_=4, lowercase_=4, lowercase_=37, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=0.02, ) -> Optional[int]:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = rotary_dim
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = initializer_range
snake_case = None
snake_case = vocab_size - 1
snake_case = vocab_size - 1
snake_case = vocab_size - 1
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = GPTJConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, use_cache=lowercase_, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, rotary_dim=self.rotary_dim, )
return (config, input_ids, input_mask)
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Dict:
snake_case = 20
snake_case = model_class_name(lowercase_ )
snake_case = model.init_cache(input_ids.shape[0], lowercase_ )
snake_case = jnp.ones((input_ids.shape[0], max_decoder_length), dtype='i4' )
snake_case = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case = model(
input_ids[:, :-1], attention_mask=lowercase_, past_key_values=lowercase_, position_ids=lowercase_, )
snake_case = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype='i4' )
snake_case = model(
input_ids[:, -1:], attention_mask=lowercase_, past_key_values=outputs_cache.past_key_values, position_ids=lowercase_, )
snake_case = model(lowercase_ )
snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Tuple:
snake_case = 20
snake_case = model_class_name(lowercase_ )
snake_case = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )], axis=-1, )
snake_case = model.init_cache(input_ids.shape[0], lowercase_ )
snake_case = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case = model(
input_ids[:, :-1], attention_mask=lowercase_, past_key_values=lowercase_, position_ids=lowercase_, )
snake_case = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype='i4' )
snake_case = model(
input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=lowercase_, position_ids=lowercase_, )
snake_case = model(lowercase_, attention_mask=lowercase_ )
snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
@require_flax
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _lowerCamelCase ( self ) -> Any:
snake_case = FlaxGPTJModelTester(self )
def _lowerCamelCase ( self ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowercase_, lowercase_, lowercase_, lowercase_ )
def _lowerCamelCase ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowercase_, lowercase_, lowercase_, lowercase_ )
@tooslow
def _lowerCamelCase ( self ) -> str:
snake_case = GPTaTokenizer.from_pretrained('gpt2', pad_token='<|endoftext|>', padding_side='left' )
snake_case = tokenizer(['Hello this is a long string', 'Hey'], return_tensors='np', padding=lowercase_, truncation=lowercase_ )
snake_case = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
snake_case = False
snake_case = model.config.eos_token_id
snake_case = jax.jit(model.generate )
snake_case = jit_generate(
inputs['input_ids'], attention_mask=inputs['attention_mask'], pad_token_id=tokenizer.pad_token_id ).sequences
snake_case = tokenizer.batch_decode(lowercase_, skip_special_tokens=lowercase_ )
snake_case = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(lowercase_, lowercase_ )
@is_pt_flax_cross_test
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case = self._prepare_for_class(lowercase_, lowercase_ )
snake_case = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case = getattr(lowercase_, lowercase_ )
snake_case , snake_case = pt_inputs['input_ids'].shape
snake_case = np.random.randint(0, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(lowercase_ ):
snake_case = 0
snake_case = 1
snake_case = 0
snake_case = 1
snake_case = pt_model_class(lowercase_ ).eval()
snake_case = model_class(lowercase_, dtype=jnp.floataa )
snake_case = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowercase_ )
snake_case = fx_state
with torch.no_grad():
snake_case = pt_model(**lowercase_ ).to_tuple()
snake_case = fx_model(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ), len(lowercase_ ), 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowercase_, lowercase_ ):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase_ )
snake_case = model_class.from_pretrained(lowercase_, from_pt=lowercase_ )
snake_case = fx_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(
len(lowercase_ ), len(lowercase_ ), 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(lowercase_, lowercase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4E-2 )
@is_pt_flax_cross_test
def _lowerCamelCase ( self ) -> Dict:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case = self._prepare_for_class(lowercase_, lowercase_ )
snake_case = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case = getattr(lowercase_, lowercase_ )
snake_case = pt_model_class(lowercase_ ).eval()
snake_case = model_class(lowercase_, dtype=jnp.floataa )
snake_case = load_flax_weights_in_pytorch_model(lowercase_, fx_model.params )
snake_case , snake_case = pt_inputs['input_ids'].shape
snake_case = np.random.randint(0, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(lowercase_ ):
snake_case = 0
snake_case = 1
snake_case = 0
snake_case = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
snake_case = pt_model(**lowercase_ ).to_tuple()
snake_case = fx_model(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ), len(lowercase_ ), 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowercase_, lowercase_ ):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase_ )
snake_case = pt_model_class.from_pretrained(lowercase_, from_flax=lowercase_ )
with torch.no_grad():
snake_case = pt_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(
len(lowercase_ ), len(lowercase_ ), 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowercase_, lowercase_ ):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4E-2 )
@tooslow
def _lowerCamelCase ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
snake_case = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase_ )
| 371 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ''''''
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str:
super().__init__(self, **lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case = fsspec.open(
lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case = os.path.basename(self.file.path.split('::' )[0] )
snake_case = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case = None
@classmethod
def _lowerCamelCase ( cls, lowercase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('/' )
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.dir_cache is None:
snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case = {f['name']: f}
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.file.open().read()
def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any:
snake_case = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''bz2'''
snake_case_ = '''bz2'''
snake_case_ = '''.bz2'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''gzip'''
snake_case_ = '''gzip'''
snake_case_ = '''.gz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''lz4'''
snake_case_ = '''lz4'''
snake_case_ = '''.lz4'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''xz'''
snake_case_ = '''xz'''
snake_case_ = '''.xz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''zstd'''
snake_case_ = '''zstd'''
snake_case_ = '''.zst'''
def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]:
super().__init__(
fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case = self.file.__enter__
class lowerCamelCase :
def __init__( self, lowercase_ ) -> List[Any]:
snake_case = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self, *lowercase_, **lowercase_ ) -> Dict:
self._file.__exit__(*lowercase_, **lowercase_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _lowerCamelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self, lowercase_ ) -> List[Any]:
return getattr(self._file, lowercase_ )
def fixed_enter(*lowercase_, **lowercase_ ):
return WrappedFile(_enter(*lowercase_, **lowercase_ ) )
snake_case = fixed_enter
| 332 | 0 |
'''simple docstring'''
class lowerCamelCase ( __lowerCAmelCase ):
pass
class lowerCamelCase ( __lowerCAmelCase ):
pass
class lowerCamelCase :
def __init__( self ) -> Dict:
snake_case = [
[],
[],
[],
]
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> None:
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(lowercase_ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def _lowerCamelCase ( self ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ) -> str:
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class lowerCamelCase :
def __init__( self ) -> str:
snake_case = []
def _lowerCamelCase ( self, lowercase_ ) -> None:
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(lowercase_ )
def _lowerCamelCase ( self ) -> int:
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
snake_case = min(self.queue )
self.queue.remove(lowercase_ )
return data
def __str__( self ) -> str:
return str(self.queue )
def __magic_name__ ( ) -> Dict:
snake_case = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __magic_name__ ( ) -> Union[str, Any]:
snake_case = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 350 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A , A , A ) -> int | float:
if len(A ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(A )
or left < -len(A )
or right >= len(A )
or right < -len(A )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
snake_case = (left + right) >> 1 # the middle
snake_case = find_max(A , A , A ) # find max in range[left, mid]
snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 332 | 0 |
'''simple docstring'''
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
lowerCAmelCase_ = "facebook/wmt19-en-de"
lowerCAmelCase_ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
lowerCAmelCase_ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
lowerCAmelCase_ = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
lowerCAmelCase_ = tokenizer(["Making tiny model"], return_tensors="pt")
lowerCAmelCase_ = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
lowerCAmelCase_ = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 351 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str:
super().__init__()
# pass init params to Encoder
snake_case = Encoder(
in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, )
snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ )
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
# pass init params to Decoder
snake_case = Decoder(
in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput:
snake_case = self.encoder(lowercase_ )
snake_case = self.quant_conv(lowercase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_ )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case , snake_case , snake_case = self.quantize(lowercase_ )
else:
snake_case = h
snake_case = self.post_quant_conv(lowercase_ )
snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case = sample
snake_case = self.encode(lowercase_ ).latents
snake_case = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( A ) -> List[Any]:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
snake_case = len(A )
snake_case = max(A )
snake_case = min(A )
# create the counting array
snake_case = coll_max + 1 - coll_min
snake_case = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , A ):
snake_case = counting_arr[i] + counting_arr[i - 1]
# create the output collection
snake_case = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , A ) ):
snake_case = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __magic_name__ ( A ) -> List[Any]:
return "".join([chr(A ) for i in counting_sort([ord(A ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
lowerCAmelCase_ = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 352 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int:
snake_case = [0]
snake_case = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case = 0
# the area corresponding to the grid that gives the product closest to target
snake_case = 0
# an estimate of b, using the quadratic formula
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the triangle number corresponding to b_floor
snake_case = 42
# the triangle number corresponding to b_ceil
snake_case = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case = floor(A )
snake_case = ceil(A )
snake_case = triangle_numbers[b_floor]
snake_case = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_first_guess * triangle_a
snake_case = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_second_guess * triangle_a
snake_case = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 332 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
def __init__( self, lowercase_, lowercase_=13, lowercase_=32, lowercase_=3, lowercase_=4, lowercase_=[10, 20, 30, 40], lowercase_=[2, 2, 3, 2], lowercase_=True, lowercase_=True, lowercase_=37, lowercase_="gelu", lowercase_=10, lowercase_=0.02, lowercase_=["stage2", "stage3", "stage4"], lowercase_=3, lowercase_=None, ) -> Union[str, Any]:
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = num_channels
snake_case = num_stages
snake_case = hidden_sizes
snake_case = depths
snake_case = is_training
snake_case = use_labels
snake_case = intermediate_size
snake_case = hidden_act
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = out_features
snake_case = num_labels
snake_case = scope
snake_case = num_stages
def _lowerCamelCase ( self ) -> str:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ) -> int:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def _lowerCamelCase ( self ) -> Dict:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowercase_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowercase_, loss_ignore_index=255, num_labels=self.num_labels, )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> List[str]:
snake_case = UperNetForSemanticSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowerCamelCase ( self ) -> str:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
snake_case_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
snake_case_ = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _lowerCamelCase ( self ) -> str:
snake_case = UperNetModelTester(self )
snake_case = ConfigTester(self, config_class=lowercase_, has_text_modality=lowercase_, hidden_size=37 )
def _lowerCamelCase ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self ) -> Any:
return
def _lowerCamelCase ( self ) -> int:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(lowercase_ )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1], lowercase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def _lowerCamelCase ( self ) -> List[str]:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def _lowerCamelCase ( self ) -> int:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _lowerCamelCase ( self ) -> Any:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _lowerCamelCase ( self ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCamelCase ( self ) -> Optional[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCamelCase ( self ) -> Optional[Any]:
pass
def _lowerCamelCase ( self ) -> Optional[int]:
def check_hidden_states_output(lowercase_, lowercase_, lowercase_ ):
snake_case = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(lowercase_, lowercase_ ) )
snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = True
check_hidden_states_output(lowercase_, lowercase_, lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case = True
check_hidden_states_output(lowercase_, lowercase_, lowercase_ )
def _lowerCamelCase ( self ) -> Any:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = _config_zero_init(lowercase_ )
snake_case = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
snake_case = model_class(config=lowercase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip(reason='UperNet does not have tied weights' )
def _lowerCamelCase ( self ) -> Optional[int]:
pass
@slow
def _lowerCamelCase ( self ) -> Optional[Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = UperNetForSemanticSegmentation.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __magic_name__ ( ) -> str:
snake_case = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
snake_case = Image.open(UpperCamelCase_ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Tuple:
snake_case = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
snake_case = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowercase_ )
snake_case = prepare_img()
snake_case = processor(images=lowercase_, return_tensors='pt' ).to(lowercase_ )
with torch.no_grad():
snake_case = model(**lowercase_ )
snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowercase_ )
snake_case = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowercase_, atol=1E-4 ) )
def _lowerCamelCase ( self ) -> Dict:
snake_case = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
snake_case = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowercase_ )
snake_case = prepare_img()
snake_case = processor(images=lowercase_, return_tensors='pt' ).to(lowercase_ )
with torch.no_grad():
snake_case = model(**lowercase_ )
snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowercase_ )
snake_case = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowercase_, atol=1E-4 ) )
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 332 | 0 |
'''simple docstring'''
import math
import random
def __magic_name__ ( A , A = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCAmelCase_ = 0.02
def __magic_name__ ( A , A ) -> float:
snake_case = float(2 * (random.randint(1 , 1_0_0 )) - 1 )
for _ in range(A ):
# Forward propagation
snake_case = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
snake_case = (expected / 1_0_0) - layer_a
# Error delta
snake_case = layer_1_error * sigmoid_function(A , A )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_0_0
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = int(input("Expected value: "))
lowerCAmelCase_ = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 354 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
snake_case_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
snake_case_ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
snake_case_ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
snake_case_ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''train'''
snake_case_ = '''dev'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int:
snake_case = args
snake_case = is_language_sensitive
snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_, lowercase_ ):
try:
snake_case = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
snake_case = mode
# Load data features from cache or dataset file
snake_case = 'v2' if args.version_2_with_negative else 'v1'
snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
snake_case = time.time()
snake_case = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case = self.old_features['features']
snake_case = self.old_features.get('dataset', lowercase_ )
snake_case = self.old_features.get('examples', lowercase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
snake_case = self.processor.get_dev_examples(args.data_dir )
else:
snake_case = self.processor.get_train_examples(args.data_dir )
snake_case , snake_case = squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, )
snake_case = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
snake_case = self.features[i]
snake_case = torch.tensor(feature.input_ids, dtype=torch.long )
snake_case = torch.tensor(feature.attention_mask, dtype=torch.long )
snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long )
snake_case = torch.tensor(feature.cls_index, dtype=torch.long )
snake_case = torch.tensor(feature.p_mask, dtype=torch.float )
snake_case = torch.tensor(feature.is_impossible, dtype=torch.float )
snake_case = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case = torch.tensor(feature.start_position, dtype=torch.long )
snake_case = torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 332 | 0 |
def __magic_name__ ( A ) -> list:
if any(not isinstance(A , A ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(A ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(A , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 355 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A , A , A ) -> Any:
# Initialise PyTorch model
snake_case = BertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case = BertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 332 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __magic_name__ ( A ) -> List[Any]:
snake_case = 3_8_4
if "tiny" in model_name:
snake_case = [3, 3, 9, 3]
snake_case = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
snake_case = [3, 3, 2_7, 3]
snake_case = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
snake_case = [3, 3, 2_7, 3]
snake_case = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
snake_case = 5_1_2
if "large" in model_name:
snake_case = [3, 3, 2_7, 3]
snake_case = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
snake_case = 7_6_8
if "xlarge" in model_name:
snake_case = [3, 3, 2_7, 3]
snake_case = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
snake_case = 1_0_2_4
# set label information
snake_case = 1_5_0
snake_case = 'huggingface/label-files'
snake_case = 'ade20k-id2label.json'
snake_case = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
snake_case = {int(A ): v for k, v in idalabel.items()}
snake_case = {v: k for k, v in idalabel.items()}
snake_case = ConvNextConfig(
depths=A , hidden_sizes=A , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
snake_case = UperNetConfig(
backbone_config=A , auxiliary_in_channels=A , num_labels=A , idalabel=A , labelaid=A , )
return config
def __magic_name__ ( A ) -> Dict:
snake_case = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __magic_name__ ( A , A , A ) -> int:
snake_case = dct.pop(A )
snake_case = val
def __magic_name__ ( A , A , A ) -> Optional[int]:
snake_case = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
snake_case = model_name_to_url[model_name]
snake_case = torch.hub.load_state_dict_from_url(A , map_location='cpu' )['state_dict']
snake_case = get_upernet_config(A )
snake_case = UperNetForSemanticSegmentation(A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
snake_case = state_dict.pop(A )
if "bn" in key:
snake_case = key.replace('bn' , 'batch_norm' )
snake_case = val
# rename keys
snake_case = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
model.load_state_dict(A )
# verify on image
snake_case = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
snake_case = Image.open(requests.get(A , stream=A ).raw ).convert('RGB' )
snake_case = SegformerImageProcessor()
snake_case = processor(A , return_tensors='pt' ).pixel_values
with torch.no_grad():
snake_case = model(A )
if model_name == "upernet-convnext-tiny":
snake_case = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
snake_case = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
snake_case = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
snake_case = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
snake_case = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f"upernet-convnext-{size}" for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCAmelCase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 356 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> list:
if len(A ) == 0:
return []
snake_case , snake_case = min(A ), max(A )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 332 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = (DDPMScheduler,)
def _lowerCamelCase ( self, **lowercase_ ) -> List[str]:
snake_case = {
'num_train_timesteps': 1000,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**lowercase_ )
return config
def _lowerCamelCase ( self ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_, beta_end=lowercase_ )
def _lowerCamelCase ( self ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def _lowerCamelCase ( self ) -> str:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowercase_ )
def _lowerCamelCase ( self ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_, prediction_type=lowercase_, sample_max_value=lowercase_, )
def _lowerCamelCase ( self ) -> List[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def _lowerCamelCase ( self ) -> Tuple:
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowercase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _lowerCamelCase ( self ) -> int:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowercase_ )
snake_case = len(lowercase_ )
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
snake_case = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
snake_case = model(lowercase_, lowercase_ )
# 2. predict previous mean of sample x_t-1
snake_case = scheduler.step(lowercase_, lowercase_, lowercase_, generator=lowercase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case = pred_prev_sample
snake_case = torch.sum(torch.abs(lowercase_ ) )
snake_case = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(prediction_type='v_prediction' )
snake_case = scheduler_class(**lowercase_ )
snake_case = len(lowercase_ )
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
snake_case = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
snake_case = model(lowercase_, lowercase_ )
# 2. predict previous mean of sample x_t-1
snake_case = scheduler.step(lowercase_, lowercase_, lowercase_, generator=lowercase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case = pred_prev_sample
snake_case = torch.sum(torch.abs(lowercase_ ) )
snake_case = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowercase_ )
snake_case = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowercase_ )
snake_case = scheduler.timesteps
for i, timestep in enumerate(lowercase_ ):
if i == len(lowercase_ ) - 1:
snake_case = -1
else:
snake_case = timesteps[i + 1]
snake_case = scheduler.previous_timestep(lowercase_ )
snake_case = prev_t.item()
self.assertEqual(lowercase_, lowercase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowercase_ )
snake_case = [100, 87, 50, 51, 0]
with self.assertRaises(lowercase_, msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=lowercase_ )
def _lowerCamelCase ( self ) -> Dict:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowercase_ )
snake_case = [100, 87, 50, 1, 0]
snake_case = len(lowercase_ )
with self.assertRaises(lowercase_, msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=lowercase_, timesteps=lowercase_ )
def _lowerCamelCase ( self ) -> Any:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowercase_ )
snake_case = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_, msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}', ):
scheduler.set_timesteps(timesteps=lowercase_ )
| 357 |
'''simple docstring'''
def __magic_name__ ( A ) -> float:
return 1_0 - x * x
def __magic_name__ ( A , A ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(A ) * equation(A ) >= 0:
raise ValueError('Wrong space!' )
snake_case = a
while (b - a) >= 0.01:
# Find middle point
snake_case = (a + b) / 2
# Check if middle point is root
if equation(A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A ) * equation(A ) < 0:
snake_case = c
else:
snake_case = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 332 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase_ = 2_5_0_0_0_4
lowerCAmelCase_ = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ):
snake_case_ = MBartaaTokenizer
snake_case_ = MBartaaTokenizerFast
snake_case_ = True
snake_case_ = True
def _lowerCamelCase ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = MBartaaTokenizer(lowercase_, src_lang='en_XX', tgt_lang='ro_RO', keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ) -> str:
snake_case = '<s>'
snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ), lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ), lowercase_ )
def _lowerCamelCase ( self ) -> List[str]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '<s>' )
self.assertEqual(vocab_keys[1], '<pad>' )
self.assertEqual(vocab_keys[-1], '<mask>' )
self.assertEqual(len(lowercase_ ), 1054 )
def _lowerCamelCase ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size, 1054 )
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = MBartaaTokenizer(lowercase_, src_lang='en_XX', tgt_lang='ro_RO', keep_accents=lowercase_ )
snake_case = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_, ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
snake_case = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_, [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'], )
snake_case = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
snake_case = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_, [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'], )
@slow
def _lowerCamelCase ( self ) -> str:
# fmt: off
snake_case = {'input_ids': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_, model_name='facebook/mbart-large-50', revision='d3913889c59cd5c9e456b269c376325eabad57e2', )
def _lowerCamelCase ( self ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case = self.rust_tokenizer_class.from_pretrained(lowercase_, **lowercase_ )
snake_case = self.tokenizer_class.from_pretrained(lowercase_, **lowercase_ )
snake_case = tempfile.mkdtemp()
snake_case = tokenizer_r.save_pretrained(lowercase_ )
snake_case = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
snake_case = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowercase_, lowercase_ )
# Checks everything loads correctly in the same way
snake_case = tokenizer_r.from_pretrained(lowercase_ )
snake_case = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_, lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
snake_case = tempfile.mkdtemp()
snake_case = tokenizer_r.save_pretrained(lowercase_, legacy_format=lowercase_ )
snake_case = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_, lowercase_ )
# Checks everything loads correctly in the same way
snake_case = tokenizer_r.from_pretrained(lowercase_ )
snake_case = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_, lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
snake_case = tempfile.mkdtemp()
snake_case = tokenizer_r.save_pretrained(lowercase_, legacy_format=lowercase_ )
snake_case = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case = tokenizer_r.from_pretrained(lowercase_ )
snake_case = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_, lowercase_ ) )
shutil.rmtree(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
snake_case_ = '''facebook/mbart-large-50-one-to-many-mmt'''
snake_case_ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
snake_case_ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
snake_case_ = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def _lowerCamelCase ( cls ) -> str:
snake_case = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='en_XX', tgt_lang='ro_RO' )
snake_case = 1
return cls
def _lowerCamelCase ( self ) -> str:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'], 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'], 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'], 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'], 250038 )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowercase_ )
def _lowerCamelCase ( self ) -> List[Any]:
self.assertIn(lowercase_, self.tokenizer.all_special_ids )
snake_case = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
snake_case = self.tokenizer.decode(lowercase_, skip_special_tokens=lowercase_ )
snake_case = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_, lowercase_ )
self.assertNotIn(self.tokenizer.eos_token, lowercase_ )
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0], lowercase_ )
snake_case = 10
snake_case = self.tokenizer(lowercase_, max_length=lowercase_, truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[0], lowercase_ )
self.assertEqual(ids[-1], 2 )
self.assertEqual(len(lowercase_ ), lowercase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ), [250053, 250001] )
def _lowerCamelCase ( self ) -> int:
snake_case = tempfile.mkdtemp()
snake_case = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
snake_case = MBartaaTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowercase_ )
@require_torch
def _lowerCamelCase ( self ) -> List[str]:
snake_case = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowercase_, return_tensors='pt' )
snake_case = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _lowerCamelCase ( self ) -> List[str]:
snake_case = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowercase_, truncation=lowercase_, max_length=len(self.expected_src_tokens ), return_tensors='pt', )
snake_case = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase_, lowercase_ )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowercase_ )
self.assertEqual(2, batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = self.tokenizer(self.src_text, padding=lowercase_, truncation=lowercase_, max_length=3, return_tensors='pt' )
snake_case = self.tokenizer(
text_target=self.tgt_text, padding=lowercase_, truncation=lowercase_, max_length=10, return_tensors='pt' )
snake_case = targets['input_ids']
snake_case = shift_tokens_right(lowercase_, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def _lowerCamelCase ( self ) -> List[str]:
snake_case = self.tokenizer._build_translation_inputs(
'A test', return_tensors='pt', src_lang='en_XX', tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(lowercase_ ), {
# en_XX, A, test, EOS
'input_ids': [[250004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
}, )
| 358 |
'''simple docstring'''
import pytest
lowerCAmelCase_ = "__dummy_dataset1__"
lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def __magic_name__ ( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __magic_name__ ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __magic_name__ ( A , A , A ) -> Optional[int]:
snake_case = dataset_loading_script_name
snake_case = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=A )
snake_case = script_dir / F'''{script_name}.py'''
with open(A , 'w' ) as f:
f.write(A )
return str(A )
| 332 | 0 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase_ = ["text", "image", "audio"]
def __magic_name__ ( A ) -> Optional[int]:
snake_case = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(A , A ):
inputs.append(create_inputs(A ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( A ) -> int:
snake_case = []
for output in outputs:
if isinstance(A , (str, AgentText) ):
output_types.append('text' )
elif isinstance(A , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(A , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowerCamelCase :
def _lowerCamelCase ( self ) -> Any:
self.assertTrue(hasattr(self.tool, 'inputs' ) )
self.assertTrue(hasattr(self.tool, 'outputs' ) )
snake_case = self.tool.inputs
for _input in inputs:
if isinstance(_input, lowercase_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _lowerCamelCase ( self ) -> List[str]:
snake_case = create_inputs(self.tool.inputs )
snake_case = self.tool(*lowercase_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case = [outputs]
self.assertListEqual(output_types(lowercase_ ), self.tool.outputs )
def _lowerCamelCase ( self ) -> Optional[int]:
self.assertTrue(hasattr(self.tool, 'description' ) )
self.assertTrue(hasattr(self.tool, 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def _lowerCamelCase ( self ) -> Any:
snake_case = create_inputs(self.tool.inputs )
snake_case = self.tool(*lowercase_ )
if not isinstance(lowercase_, lowercase_ ):
snake_case = [outputs]
self.assertEqual(len(lowercase_ ), len(self.tool.outputs ) )
for output, output_type in zip(lowercase_, self.tool.outputs ):
snake_case = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowercase_, lowercase_ ) )
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = create_inputs(self.tool.inputs )
snake_case = []
for _input, input_type in zip(lowercase_, self.tool.inputs ):
if isinstance(lowercase_, lowercase_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case = self.tool(*lowercase_ )
if not isinstance(lowercase_, lowercase_ ):
snake_case = [outputs]
self.assertEqual(len(lowercase_ ), len(self.tool.outputs ) )
| 359 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase_ = Lock()
def __magic_name__ ( A , A , A , A , A , A , A ) -> Any:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A ) -> str:
snake_case = []
snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case = temp_rs
snake_case = temp_rr
for i in range(1 , len(A ) - 1 ):
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case = temp_rs
snake_case = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ) -> Tuple:
snake_case = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*A )
snake_case = odd_even_transposition(A )
print('Sorted List\n' )
print(*A )
if __name__ == "__main__":
main()
| 332 | 0 |
'''simple docstring'''
import pprint
import requests
lowerCAmelCase_ = "https://zenquotes.io/api"
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
lowerCAmelCase_ = random_quotes()
pprint.pprint(response)
| 360 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __magic_name__ ( A ) -> Tuple:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __magic_name__ ( A , A ) -> Optional[int]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __magic_name__ ( A ) -> List[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def __magic_name__ ( ) -> Dict:
snake_case = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __magic_name__ ( A , A , A , A ) -> int:
snake_case = 'imagenet-1k-id2label.json'
snake_case = 1_0_0_0
snake_case = 'huggingface/label-files'
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) )
snake_case = {int(A ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 2_0]
snake_case = [3, 1_2, 1_6]
snake_case = [1_9_2, 7_6_8, 1_0_2_4]
snake_case = CvtForImageClassification(A )
snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
snake_case = image_size
snake_case = torch.load(A , map_location=torch.device('cpu' ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(A )
snake_case = list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(A , A )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=3_8_4,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 361 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''roberta'''
def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple:
super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 332 | 0 |
'''simple docstring'''
import os
def __magic_name__ ( A = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(A ) , A ) ) as input_file:
snake_case = [
[int(A ) for element in line.split(',' )]
for line in input_file.readlines()
]
snake_case = len(A )
snake_case = len(matrix[0] )
snake_case = [[-1 for _ in range(A )] for _ in range(A )]
for i in range(A ):
snake_case = matrix[i][0]
for j in range(1 , A ):
for i in range(A ):
snake_case = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , A ):
snake_case = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
snake_case = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"{solution() = }")
| 362 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332 | 0 |
'''simple docstring'''
import numpy as np
def __magic_name__ ( A ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __magic_name__ ( A ) -> Tuple:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __magic_name__ ( A , A ) -> Optional[int]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __magic_name__ ( A ) -> List[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def __magic_name__ ( ) -> Dict:
snake_case = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __magic_name__ ( A , A , A , A ) -> int:
snake_case = 'imagenet-1k-id2label.json'
snake_case = 1_0_0_0
snake_case = 'huggingface/label-files'
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) )
snake_case = {int(A ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 2_0]
snake_case = [3, 1_2, 1_6]
snake_case = [1_9_2, 7_6_8, 1_0_2_4]
snake_case = CvtForImageClassification(A )
snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
snake_case = image_size
snake_case = torch.load(A , map_location=torch.device('cpu' ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(A )
snake_case = list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(A , A )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=3_8_4,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 332 | 0 |
'''simple docstring'''
from __future__ import annotations
class lowerCamelCase :
def __init__( self, lowercase_ = 0 ) -> Optional[int]:
snake_case = key
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> list[str]:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_ ) ^ key ) for ch in content]
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> list[str]:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_ ) ^ key ) for ch in content]
def _lowerCamelCase ( self, lowercase_, lowercase_ = 0 ) -> str:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
snake_case = ''
for ch in content:
ans += chr(ord(lowercase_ ) ^ key )
return ans
def _lowerCamelCase ( self, lowercase_, lowercase_ = 0 ) -> str:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
snake_case = ''
for ch in content:
ans += chr(ord(lowercase_ ) ^ key )
return ans
def _lowerCamelCase ( self, lowercase_, lowercase_ = 0 ) -> bool:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
try:
with open(lowercase_ ) as fin, open('encrypt.out', 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowercase_, lowercase_ ) )
except OSError:
return False
return True
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> bool:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
try:
with open(lowercase_ ) as fin, open('decrypt.out', 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowercase_, lowercase_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 364 |
'''simple docstring'''
from pathlib import Path
import fire
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = Path(A )
snake_case = Path(A )
dest_dir.mkdir(exist_ok=A )
for path in src_dir.iterdir():
snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case = dest_dir.joinpath(path.name )
print(A )
dest_path.open('w' ).write('\n'.join(A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 332 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = UniSpeechSatForSequenceClassification.from_pretrained(A , config=A )
snake_case = downstream_dict['projector.weight']
snake_case = downstream_dict['projector.bias']
snake_case = downstream_dict['model.post_net.linear.weight']
snake_case = downstream_dict['model.post_net.linear.bias']
return model
def __magic_name__ ( A , A , A ) -> Optional[Any]:
snake_case = UniSpeechSatForAudioFrameClassification.from_pretrained(A , config=A )
snake_case = downstream_dict['model.linear.weight']
snake_case = downstream_dict['model.linear.bias']
return model
def __magic_name__ ( A , A , A ) -> int:
snake_case = UniSpeechSatForXVector.from_pretrained(A , config=A )
snake_case = downstream_dict['connector.weight']
snake_case = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
snake_case = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
snake_case = downstream_dict['objective.W']
return model
@torch.no_grad()
def __magic_name__ ( A , A , A , A ) -> Optional[Any]:
snake_case = torch.load(A , map_location='cpu' )
snake_case = checkpoint['Downstream']
snake_case = UniSpeechSatConfig.from_pretrained(A )
snake_case = WavaVecaFeatureExtractor.from_pretrained(
A , return_attention_mask=A , do_normalize=A )
snake_case = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
snake_case = convert_classification(A , A , A )
elif arch.endswith('ForAudioFrameClassification' ):
snake_case = convert_diarization(A , A , A )
elif arch.endswith('ForXVector' ):
snake_case = convert_xvector(A , A , A )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
snake_case = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(A )
hf_model.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
lowerCAmelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 | 0 |
'''simple docstring'''
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __magic_name__ ( A=None ) -> Any:
if subparsers is not None:
snake_case = subparsers.add_parser('env' )
else:
snake_case = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file' , default=A , help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=A )
return parser
def __magic_name__ ( A ) -> str:
snake_case = torch.__version__
snake_case = torch.cuda.is_available()
snake_case = is_xpu_available()
snake_case = is_npu_available()
snake_case = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(A ):
snake_case = load_config_from_file(args.config_file ).to_dict()
snake_case = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''',
'PyTorch XPU available': str(A ),
'PyTorch NPU available': str(A ),
'System RAM': F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''',
}
if pt_cuda_available:
snake_case = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
snake_case = (
'\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(A , A )
else F'''\t{accelerate_config}'''
)
print(A )
snake_case = accelerate_config
return info
def __magic_name__ ( ) -> int:
snake_case = env_command_parser()
snake_case = parser.parse_args()
env_command(A )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 367 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase_ = False
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 12
@property
def _lowerCamelCase ( self ) -> Dict:
return 12
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 32
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case = VQModel(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, )
return model
@property
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModel(lowercase_ )
@property
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case = 12
snake_case = 12
snake_case = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
snake_case = TransformeraDModel(**lowercase_ )
return model
def _lowerCamelCase ( self ) -> Tuple:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
snake_case = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipeline(
'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', )
snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 332 | 0 |
'''simple docstring'''
lowerCAmelCase_ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase_ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase_ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 368 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332 | 0 |
'''simple docstring'''
from functools import reduce
lowerCAmelCase_ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __magic_name__ ( A = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda A , A : str(int(A ) * int(A ) ) , n[i : i + 1_3] ) )
for i in range(len(A ) - 1_2 ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 369 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 332 | 0 |
'''simple docstring'''
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 370 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> Any:
plt.scatter(A , A , color='red' )
plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 332 | 0 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
snake_case_ = None
snake_case_ = None
class lowerCamelCase ( folder_based_builder.FolderBasedBuilder ):
snake_case_ = datasets.Audio()
snake_case_ = '''audio'''
snake_case_ = AudioFolderConfig
snake_case_ = 42 # definition at the bottom of the script
snake_case_ = AudioClassification(audio_column='''audio''' , label_column='''label''' )
lowerCAmelCase_ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
lowerCAmelCase_ = AUDIO_EXTENSIONS
| 371 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ''''''
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str:
super().__init__(self, **lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case = fsspec.open(
lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case = os.path.basename(self.file.path.split('::' )[0] )
snake_case = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case = None
@classmethod
def _lowerCamelCase ( cls, lowercase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('/' )
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.dir_cache is None:
snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case = {f['name']: f}
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.file.open().read()
def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any:
snake_case = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''bz2'''
snake_case_ = '''bz2'''
snake_case_ = '''.bz2'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''gzip'''
snake_case_ = '''gzip'''
snake_case_ = '''.gz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''lz4'''
snake_case_ = '''lz4'''
snake_case_ = '''.lz4'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''xz'''
snake_case_ = '''xz'''
snake_case_ = '''.xz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''zstd'''
snake_case_ = '''zstd'''
snake_case_ = '''.zst'''
def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]:
super().__init__(
fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case = self.file.__enter__
class lowerCamelCase :
def __init__( self, lowercase_ ) -> List[Any]:
snake_case = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self, *lowercase_, **lowercase_ ) -> Dict:
self._file.__exit__(*lowercase_, **lowercase_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _lowerCamelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self, lowercase_ ) -> List[Any]:
return getattr(self._file, lowercase_ )
def fixed_enter(*lowercase_, **lowercase_ ):
return WrappedFile(_enter(*lowercase_, **lowercase_ ) )
snake_case = fixed_enter
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( A ) -> list[int]:
snake_case = len(A )
for i in range(A ):
for j in range(i + 1 , A ):
if numbers[j] < numbers[i]:
snake_case , snake_case = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase_ = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 350 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A , A , A ) -> int | float:
if len(A ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(A )
or left < -len(A )
or right >= len(A )
or right < -len(A )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
snake_case = (left + right) >> 1 # the middle
snake_case = find_max(A , A , A ) # find max in range[left, mid]
snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 332 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 351 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str:
super().__init__()
# pass init params to Encoder
snake_case = Encoder(
in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, )
snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ )
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
# pass init params to Decoder
snake_case = Decoder(
in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput:
snake_case = self.encoder(lowercase_ )
snake_case = self.quant_conv(lowercase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_ )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case , snake_case , snake_case = self.quantize(lowercase_ )
else:
snake_case = h
snake_case = self.post_quant_conv(lowercase_ )
snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case = sample
snake_case = self.encode(lowercase_ ).latents
snake_case = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 332 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __magic_name__ ( A ) -> Union[str, Any]:
snake_case , snake_case = image.size
snake_case , snake_case = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
snake_case = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
snake_case = np.array(A ).astype(np.floataa ) / 255.0
snake_case = image[None].transpose(0 , 3 , 1 , 2 )
snake_case = torch.from_numpy(A )
return 2.0 * image - 1.0
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, lowercase_, lowercase_, lowercase_, ) -> Dict:
super().__init__()
self.register_modules(vqvae=lowercase_, unet=lowercase_, scheduler=lowercase_ )
@torch.no_grad()
def __call__( self, lowercase_ = None, lowercase_ = 1, lowercase_ = 100, lowercase_ = 0.0, lowercase_ = None, lowercase_ = "pil", lowercase_ = True, ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowercase_, PIL.Image.Image ):
snake_case = 1
elif isinstance(lowercase_, torch.Tensor ):
snake_case = image.shape[0]
else:
raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowercase_ )}''' )
if isinstance(lowercase_, PIL.Image.Image ):
snake_case = preprocess(lowercase_ )
snake_case , snake_case = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case = next(self.unet.parameters() ).dtype
snake_case = randn_tensor(lowercase_, generator=lowercase_, device=self.device, dtype=lowercase_ )
snake_case = image.to(device=self.device, dtype=lowercase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowercase_, device=self.device )
snake_case = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case = {}
if accepts_eta:
snake_case = eta
for t in self.progress_bar(lowercase_ ):
# concat latents and low resolution image in the channel dimension.
snake_case = torch.cat([latents, image], dim=1 )
snake_case = self.scheduler.scale_model_input(lowercase_, lowercase_ )
# predict the noise residual
snake_case = self.unet(lowercase_, lowercase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case = self.scheduler.step(lowercase_, lowercase_, lowercase_, **lowercase_ ).prev_sample
# decode the image latents with the VQVAE
snake_case = self.vqvae.decode(lowercase_ ).sample
snake_case = torch.clamp(lowercase_, -1.0, 1.0 )
snake_case = image / 2 + 0.5
snake_case = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
snake_case = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 352 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int:
snake_case = [0]
snake_case = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case = 0
# the area corresponding to the grid that gives the product closest to target
snake_case = 0
# an estimate of b, using the quadratic formula
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the triangle number corresponding to b_floor
snake_case = 42
# the triangle number corresponding to b_ceil
snake_case = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case = floor(A )
snake_case = ceil(A )
snake_case = triangle_numbers[b_floor]
snake_case = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_first_guess * triangle_a
snake_case = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_second_guess * triangle_a
snake_case = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 332 | 0 |
'''simple docstring'''
from torch import nn
def __magic_name__ ( UpperCamelCase_ ) -> int:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 332 | 0 |
'''simple docstring'''
import math
def __magic_name__ ( A , A ) -> int:
snake_case = len(A )
snake_case = int(math.floor(math.sqrt(A ) ) )
snake_case = 0
while arr[min(A , A ) - 1] < x:
snake_case = step
step += int(math.floor(math.sqrt(A ) ) )
if prev >= n:
return -1
while arr[prev] < x:
snake_case = prev + 1
if prev == min(A , A ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCAmelCase_ = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(",")]
lowerCAmelCase_ = int(input("Enter the number to be searched:\n"))
lowerCAmelCase_ = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f"Number {x} is at index {res}")
| 354 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
snake_case_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
snake_case_ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
snake_case_ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
snake_case_ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''train'''
snake_case_ = '''dev'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int:
snake_case = args
snake_case = is_language_sensitive
snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_, lowercase_ ):
try:
snake_case = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
snake_case = mode
# Load data features from cache or dataset file
snake_case = 'v2' if args.version_2_with_negative else 'v1'
snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
snake_case = time.time()
snake_case = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case = self.old_features['features']
snake_case = self.old_features.get('dataset', lowercase_ )
snake_case = self.old_features.get('examples', lowercase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
snake_case = self.processor.get_dev_examples(args.data_dir )
else:
snake_case = self.processor.get_train_examples(args.data_dir )
snake_case , snake_case = squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, )
snake_case = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
snake_case = self.features[i]
snake_case = torch.tensor(feature.input_ids, dtype=torch.long )
snake_case = torch.tensor(feature.attention_mask, dtype=torch.long )
snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long )
snake_case = torch.tensor(feature.cls_index, dtype=torch.long )
snake_case = torch.tensor(feature.p_mask, dtype=torch.float )
snake_case = torch.tensor(feature.is_impossible, dtype=torch.float )
snake_case = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case = torch.tensor(feature.start_position, dtype=torch.long )
snake_case = torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 332 | 0 |
def __magic_name__ ( A = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
try:
snake_case = int(A )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
snake_case = 1
snake_case = 2
while i * i <= n:
while n % i == 0:
snake_case = i
n //= i
i += 1
if n > 1:
snake_case = n
return int(A )
if __name__ == "__main__":
print(f"{solution() = }")
| 355 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A , A , A ) -> Any:
# Initialise PyTorch model
snake_case = BertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case = BertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 332 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __magic_name__ ( ) -> Optional[int]:
raise RuntimeError('CUDA out of memory.' )
class lowerCamelCase ( nn.Module ):
def __init__( self ) -> Optional[int]:
super().__init__()
snake_case = nn.Linear(3, 4 )
snake_case = nn.BatchNormad(4 )
snake_case = nn.Linear(4, 5 )
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) )
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Tuple:
snake_case = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_ ):
nonlocal batch_sizes
batch_sizes.append(lowercase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowercase_, [128, 64, 32, 16, 8] )
def _lowerCamelCase ( self ) -> Tuple:
snake_case = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_, lowercase_ ):
nonlocal batch_sizes
batch_sizes.append(lowercase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
snake_case , snake_case = mock_training_loop_function('hello' )
self.assertListEqual(lowercase_, [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga], [8, 'hello'] )
def _lowerCamelCase ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase_ ):
pass
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.', cm.exception.args[0] )
def _lowerCamelCase ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.', cm.exception.args[0] )
def _lowerCamelCase ( self ) -> Any:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_, lowercase_, lowercase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function(128, 'hello', 'world' )
self.assertIn('Batch size was passed into `f`', cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')', cm.exception.args[0] )
def _lowerCamelCase ( self ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase_ ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!', cm.exception.args[0] )
@require_cuda
def _lowerCamelCase ( self ) -> int:
snake_case = torch.cuda.memory_allocated()
snake_case = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated(), lowercase_ )
snake_case = release_memory(lowercase_ )
self.assertEqual(torch.cuda.memory_allocated(), lowercase_ )
| 356 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> list:
if len(A ) == 0:
return []
snake_case , snake_case = min(A ), max(A )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 332 | 0 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( A ) -> tuple:
return (data["data"], data["target"])
def __magic_name__ ( A , A ) -> XGBClassifier:
snake_case = XGBClassifier()
classifier.fit(A , A )
return classifier
def __magic_name__ ( ) -> None:
snake_case = load_iris()
snake_case , snake_case = data_handling(A )
snake_case , snake_case , snake_case , snake_case = train_test_split(
A , A , test_size=0.25 )
snake_case = iris['target_names']
# Create an XGBoost Classifier from the training data
snake_case = xgboost(A , A )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
A , A , A , display_labels=A , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 357 |
'''simple docstring'''
def __magic_name__ ( A ) -> float:
return 1_0 - x * x
def __magic_name__ ( A , A ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(A ) * equation(A ) >= 0:
raise ValueError('Wrong space!' )
snake_case = a
while (b - a) >= 0.01:
# Find middle point
snake_case = (a + b) / 2
# Check if middle point is root
if equation(A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A ) * equation(A ) < 0:
snake_case = c
else:
snake_case = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 332 | 0 |
'''simple docstring'''
from collections import defaultdict
def __magic_name__ ( A , A ) -> bool:
snake_case = first_str.lower().strip()
snake_case = second_str.lower().strip()
# Remove whitespace
snake_case = first_str.replace(' ' , '' )
snake_case = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(A ) != len(A ):
return False
# Default values for count should be 0
snake_case = defaultdict(A )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(A ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase_ = input("Enter the first string ").strip()
lowerCAmelCase_ = input("Enter the second string ").strip()
lowerCAmelCase_ = check_anagrams(input_a, input_b)
print(f"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 358 |
'''simple docstring'''
import pytest
lowerCAmelCase_ = "__dummy_dataset1__"
lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def __magic_name__ ( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __magic_name__ ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __magic_name__ ( A , A , A ) -> Optional[int]:
snake_case = dataset_loading_script_name
snake_case = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=A )
snake_case = script_dir / F'''{script_name}.py'''
with open(A , 'w' ) as f:
f.write(A )
return str(A )
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( A ) -> int:
snake_case = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def __magic_name__ ( A = 1_0_0 ) -> int:
snake_case = 1
snake_case = 2
for i in range(2 , max_n + 1 ):
snake_case = pre_numerator
snake_case = 2 * i // 3 if i % 3 == 0 else 1
snake_case = cur_numerator
snake_case = e_cont * pre_numerator + temp
return sum_digits(A )
if __name__ == "__main__":
print(f"{solution() = }")
| 359 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase_ = Lock()
def __magic_name__ ( A , A , A , A , A , A , A ) -> Any:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A ) -> str:
snake_case = []
snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case = temp_rs
snake_case = temp_rr
for i in range(1 , len(A ) - 1 ):
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case = temp_rs
snake_case = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ) -> Tuple:
snake_case = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*A )
snake_case = odd_even_transposition(A )
print('Sorted List\n' )
print(*A )
if __name__ == "__main__":
main()
| 332 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = ["model.decoder.embed_positions.weights"]
def __magic_name__ ( A ) -> int:
if "emb" in name:
snake_case = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
snake_case = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
snake_case = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
snake_case = name.replace('linear1' , 'fc1' )
if "linear2" in name:
snake_case = name.replace('linear2' , 'fc2' )
if "norm1" in name:
snake_case = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
snake_case = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
snake_case = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
snake_case = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
snake_case = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def __magic_name__ ( A , A ) -> Tuple[Dict, Dict]:
snake_case = list(state_dict.keys() )
snake_case = {}
for key in keys:
snake_case = state_dict.pop(A )
snake_case = rename_keys(A )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case = val[:hidden_size, :]
snake_case = val[hidden_size : 2 * hidden_size, :]
snake_case = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case = val
else:
snake_case = val
return state_dict, enc_dec_proj_state_dict
def __magic_name__ ( A ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
snake_case = 1_0_2_4
snake_case = 2_4
snake_case = 1_6
elif checkpoint == "medium":
snake_case = 1_5_3_6
snake_case = 4_8
snake_case = 2_4
elif checkpoint == "large":
snake_case = 2_0_4_8
snake_case = 4_8
snake_case = 3_2
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
snake_case = MusicgenDecoderConfig(
hidden_size=A , ffn_dim=hidden_size * 4 , num_hidden_layers=A , num_attention_heads=A , )
return config
@torch.no_grad()
def __magic_name__ ( A , A=None , A=None , A="cpu" ) -> Any:
snake_case = MusicGen.get_pretrained(A , device=A )
snake_case = decoder_config_from_checkpoint(A )
snake_case = fairseq_model.lm.state_dict()
snake_case , snake_case = rename_state_dict(
A , hidden_size=decoder_config.hidden_size )
snake_case = TaEncoderModel.from_pretrained('t5-base' )
snake_case = EncodecModel.from_pretrained('facebook/encodec_32khz' )
snake_case = MusicgenForCausalLM(A ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case , snake_case = decoder.load_state_dict(A , strict=A )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(A )
if len(A ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(A ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
snake_case = MusicgenForConditionalGeneration(text_encoder=A , audio_encoder=A , decoder=A )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(A )
# check we can do a forward pass
snake_case = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case = model(input_ids=A , decoder_input_ids=A ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
snake_case = AutoTokenizer.from_pretrained('t5-base' )
snake_case = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
snake_case = MusicgenProcessor(feature_extractor=A , tokenizer=A )
# set the appropriate bos/pad token ids
snake_case = 2_0_4_8
snake_case = 2_0_4_8
# set other default generation config params
snake_case = int(3_0 * audio_encoder.config.frame_rate )
snake_case = True
snake_case = 3.0
if pytorch_dump_folder is not None:
Path(A ).mkdir(exist_ok=A )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(A )
processor.save_pretrained(A )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(A )
processor.push_to_hub(A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCAmelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 360 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332 | 0 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 361 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''roberta'''
def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple:
super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 332 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 362 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332 | 0 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> Any:
plt.scatter(A , A , color='red' )
plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 363 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __magic_name__ ( A ) -> Tuple:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __magic_name__ ( A , A ) -> Optional[int]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __magic_name__ ( A ) -> List[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def __magic_name__ ( ) -> Dict:
snake_case = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __magic_name__ ( A , A , A , A ) -> int:
snake_case = 'imagenet-1k-id2label.json'
snake_case = 1_0_0_0
snake_case = 'huggingface/label-files'
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) )
snake_case = {int(A ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 2_0]
snake_case = [3, 1_2, 1_6]
snake_case = [1_9_2, 7_6_8, 1_0_2_4]
snake_case = CvtForImageClassification(A )
snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
snake_case = image_size
snake_case = torch.load(A , map_location=torch.device('cpu' ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(A )
snake_case = list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(A , A )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=3_8_4,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 332 | 0 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCAmelCase_ = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCAmelCase_ = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def __magic_name__ ( A ) -> List[Any]:
snake_case = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=A )[0]
@deprecated(A , 'Please use tf.data to implement this functionality.' )
def __magic_name__ ( A ) -> Optional[int]:
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=A ) as bytestream:
snake_case = _readaa(A )
if magic != 2_0_5_1:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
snake_case = _readaa(A )
snake_case = _readaa(A )
snake_case = _readaa(A )
snake_case = bytestream.read(rows * cols * num_images )
snake_case = numpy.frombuffer(A , dtype=numpy.uinta )
snake_case = data.reshape(A , A , A , 1 )
return data
@deprecated(A , 'Please use tf.one_hot on tensors.' )
def __magic_name__ ( A , A ) -> Union[str, Any]:
snake_case = labels_dense.shape[0]
snake_case = numpy.arange(A ) * num_classes
snake_case = numpy.zeros((num_labels, num_classes) )
snake_case = 1
return labels_one_hot
@deprecated(A , 'Please use tf.data to implement this functionality.' )
def __magic_name__ ( A , A=False , A=1_0 ) -> Dict:
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=A ) as bytestream:
snake_case = _readaa(A )
if magic != 2_0_4_9:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
snake_case = _readaa(A )
snake_case = bytestream.read(A )
snake_case = numpy.frombuffer(A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(A , A )
return labels
class lowerCamelCase :
@deprecated(
lowercase_, 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.', )
def __init__( self, lowercase_, lowercase_, lowercase_=False, lowercase_=False, lowercase_=dtypes.floataa, lowercase_=True, lowercase_=None, ) -> Dict:
snake_case , snake_case = random_seed.get_seed(lowercase_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
snake_case = dtypes.as_dtype(lowercase_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
snake_case = 10000
snake_case = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
snake_case = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
snake_case = images.reshape(
images.shape[0], images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
snake_case = images.astype(numpy.floataa )
snake_case = numpy.multiply(lowercase_, 1.0 / 255.0 )
snake_case = images
snake_case = labels
snake_case = 0
snake_case = 0
@property
def _lowerCamelCase ( self ) -> Dict:
return self._images
@property
def _lowerCamelCase ( self ) -> int:
return self._labels
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
return self._num_examples
@property
def _lowerCamelCase ( self ) -> List[str]:
return self._epochs_completed
def _lowerCamelCase ( self, lowercase_, lowercase_=False, lowercase_=True ) -> List[str]:
if fake_data:
snake_case = [1] * 784
snake_case = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowercase_ )],
[fake_label for _ in range(lowercase_ )],
)
snake_case = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase_ )
snake_case = self.images[perma]
snake_case = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
snake_case = self._num_examples - start
snake_case = self._images[start : self._num_examples]
snake_case = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase_ )
snake_case = self.images[perm]
snake_case = self.labels[perm]
# Start next epoch
snake_case = 0
snake_case = batch_size - rest_num_examples
snake_case = self._index_in_epoch
snake_case = self._images[start:end]
snake_case = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part), axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part), axis=0 ),
)
else:
self._index_in_epoch += batch_size
snake_case = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(A , 'Please write your own downloading logic.' )
def __magic_name__ ( A , A , A ) -> str:
if not gfile.Exists(A ):
gfile.MakeDirs(A )
snake_case = os.path.join(A , A )
if not gfile.Exists(A ):
urllib.request.urlretrieve(A , A ) # noqa: S310
with gfile.GFile(A ) as f:
snake_case = f.size()
print('Successfully downloaded' , A , A , 'bytes.' )
return filepath
@deprecated(
A , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def __magic_name__ ( A , A=False , A=False , A=dtypes.floataa , A=True , A=5_0_0_0 , A=None , A=DEFAULT_SOURCE_URL , ) -> Tuple:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=A , one_hot=A , dtype=A , seed=A )
snake_case = fake()
snake_case = fake()
snake_case = fake()
return _Datasets(train=A , validation=A , test=A )
if not source_url: # empty string check
snake_case = DEFAULT_SOURCE_URL
snake_case = 'train-images-idx3-ubyte.gz'
snake_case = 'train-labels-idx1-ubyte.gz'
snake_case = 't10k-images-idx3-ubyte.gz'
snake_case = 't10k-labels-idx1-ubyte.gz'
snake_case = _maybe_download(
A , A , source_url + train_images_file )
with gfile.Open(A , 'rb' ) as f:
snake_case = _extract_images(A )
snake_case = _maybe_download(
A , A , source_url + train_labels_file )
with gfile.Open(A , 'rb' ) as f:
snake_case = _extract_labels(A , one_hot=A )
snake_case = _maybe_download(
A , A , source_url + test_images_file )
with gfile.Open(A , 'rb' ) as f:
snake_case = _extract_images(A )
snake_case = _maybe_download(
A , A , source_url + test_labels_file )
with gfile.Open(A , 'rb' ) as f:
snake_case = _extract_labels(A , one_hot=A )
if not 0 <= validation_size <= len(A ):
snake_case = (
'Validation size should be between 0 and '
F'''{len(A )}. Received: {validation_size}.'''
)
raise ValueError(A )
snake_case = train_images[:validation_size]
snake_case = train_labels[:validation_size]
snake_case = train_images[validation_size:]
snake_case = train_labels[validation_size:]
snake_case = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
snake_case = _DataSet(A , A , **A )
snake_case = _DataSet(A , A , **A )
snake_case = _DataSet(A , A , **A )
return _Datasets(train=A , validation=A , test=A )
| 364 |
'''simple docstring'''
from pathlib import Path
import fire
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = Path(A )
snake_case = Path(A )
dest_dir.mkdir(exist_ok=A )
for path in src_dir.iterdir():
snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case = dest_dir.joinpath(path.name )
print(A )
dest_path.open('w' ).write('\n'.join(A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( A = 3 , A = 7 , A = 1_0_0_0_0_0_0 ) -> int:
snake_case = 0
snake_case = 1
for current_denominator in range(1 , limit + 1 ):
snake_case = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case = current_numerator
snake_case = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 365 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332 | 0 |
'''simple docstring'''
import os
from pathlib import Path
def __magic_name__ ( ) -> str:
from torch.utils.cpp_extension import load
snake_case = Path(A ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
snake_case = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , A , with_cuda=A , extra_include_paths=[str(A )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 | 0 |
'''simple docstring'''
from datetime import datetime
import requests
def __magic_name__ ( A ) -> bytes:
snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(A ).content
if __name__ == "__main__":
lowerCAmelCase_ = input("Enter Video/IGTV url: ").strip()
lowerCAmelCase_ = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.")
| 367 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase_ = False
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 12
@property
def _lowerCamelCase ( self ) -> Dict:
return 12
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 32
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case = VQModel(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, )
return model
@property
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModel(lowercase_ )
@property
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case = 12
snake_case = 12
snake_case = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
snake_case = TransformeraDModel(**lowercase_ )
return model
def _lowerCamelCase ( self ) -> Tuple:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
snake_case = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipeline(
'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', )
snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 332 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ):
snake_case_ = ShapEPipeline
snake_case_ = ['''prompt''']
snake_case_ = ['''prompt''']
snake_case_ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
snake_case_ = False
@property
def _lowerCamelCase ( self ) -> List[str]:
return 32
@property
def _lowerCamelCase ( self ) -> List[str]:
return 32
@property
def _lowerCamelCase ( self ) -> int:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self ) -> List[str]:
return 8
@property
def _lowerCamelCase ( self ) -> Dict:
snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(lowercase_ )
@property
def _lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
snake_case = PriorTransformer(**lowercase_ )
return model
@property
def _lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
snake_case = ShapERenderer(**lowercase_ )
return model
def _lowerCamelCase ( self ) -> Any:
snake_case = self.dummy_prior
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_renderer
snake_case = HeunDiscreteScheduler(
beta_schedule='exp', num_train_timesteps=1024, prediction_type='sample', use_karras_sigmas=lowercase_, clip_sample=lowercase_, clip_sample_range=1.0, )
snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self, lowercase_, lowercase_=0 ) -> str:
if str(lowercase_ ).startswith('mps' ):
snake_case = torch.manual_seed(lowercase_ )
else:
snake_case = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self ) -> str:
snake_case = 'cpu'
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**lowercase_ )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = pipe(**self.get_dummy_inputs(lowercase_ ) )
snake_case = output.images[0]
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
snake_case = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> int:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = torch_device == 'cpu'
snake_case = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=lowercase_, relax_max_difference=lowercase_, )
def _lowerCamelCase ( self ) -> Tuple:
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**lowercase_ )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 1
snake_case = 2
snake_case = self.get_dummy_inputs(lowercase_ )
for key in inputs.keys():
if key in self.batch_params:
snake_case = batch_size * [inputs[key]]
snake_case = pipe(**lowercase_, num_images_per_prompt=lowercase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
'a shark', generator=lowercase_, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase_, lowercase_ )
| 368 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''sew-d'''
def __init__( self, lowercase_=32, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_=2, lowercase_=512, lowercase_=256, lowercase_=True, lowercase_=True, lowercase_=("p2c", "c2p"), lowercase_="layer_norm", lowercase_="gelu_python", lowercase_=0.1, lowercase_=0.1, lowercase_=0.1, lowercase_=0.0, lowercase_=0.1, lowercase_=0.02, lowercase_=1E-7, lowercase_=1E-5, lowercase_="group", lowercase_="gelu", lowercase_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), lowercase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), lowercase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), lowercase_=False, lowercase_=128, lowercase_=16, lowercase_=True, lowercase_=0.05, lowercase_=10, lowercase_=2, lowercase_=0.0, lowercase_=10, lowercase_=0, lowercase_="mean", lowercase_=False, lowercase_=False, lowercase_=256, lowercase_=0, lowercase_=1, lowercase_=2, **lowercase_, ) -> Optional[Any]:
super().__init__(**lowercase_, pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_ )
snake_case = hidden_size
snake_case = feat_extract_norm
snake_case = feat_extract_activation
snake_case = list(lowercase_ )
snake_case = list(lowercase_ )
snake_case = list(lowercase_ )
snake_case = conv_bias
snake_case = num_conv_pos_embeddings
snake_case = num_conv_pos_embedding_groups
snake_case = len(self.conv_dim )
snake_case = num_hidden_layers
snake_case = intermediate_size
snake_case = squeeze_factor
snake_case = max_position_embeddings
snake_case = position_buckets
snake_case = share_att_key
snake_case = relative_attention
snake_case = norm_rel_ebd
snake_case = list(lowercase_ )
snake_case = hidden_act
snake_case = num_attention_heads
snake_case = hidden_dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = feat_proj_dropout
snake_case = final_dropout
snake_case = layer_norm_eps
snake_case = feature_layer_norm_eps
snake_case = initializer_range
snake_case = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case = apply_spec_augment
snake_case = mask_time_prob
snake_case = mask_time_length
snake_case = mask_time_min_masks
snake_case = mask_feature_prob
snake_case = mask_feature_length
snake_case = mask_feature_min_masks
# ctc loss
snake_case = ctc_loss_reduction
snake_case = ctc_zero_infinity
# sequence classification
snake_case = use_weighted_layer_sum
snake_case = classifier_proj_size
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 369 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 332 | 0 |
'''simple docstring'''
lowerCAmelCase_ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase_ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase_ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 370 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> Any:
plt.scatter(A , A , color='red' )
plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 332 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''sew'''
def __init__( self, lowercase_=32, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_=2, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=0.1, lowercase_=0.0, lowercase_=0.1, lowercase_=0.1, lowercase_=0.02, lowercase_=1E-5, lowercase_="group", lowercase_="gelu", lowercase_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), lowercase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), lowercase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), lowercase_=False, lowercase_=128, lowercase_=16, lowercase_=True, lowercase_=0.05, lowercase_=10, lowercase_=2, lowercase_=0.0, lowercase_=10, lowercase_=0, lowercase_="mean", lowercase_=False, lowercase_=False, lowercase_=256, lowercase_=0, lowercase_=1, lowercase_=2, **lowercase_, ) -> List[Any]:
super().__init__(**lowercase_, pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_ )
snake_case = hidden_size
snake_case = feat_extract_norm
snake_case = feat_extract_activation
snake_case = list(lowercase_ )
snake_case = list(lowercase_ )
snake_case = list(lowercase_ )
snake_case = conv_bias
snake_case = num_conv_pos_embeddings
snake_case = num_conv_pos_embedding_groups
snake_case = len(self.conv_dim )
snake_case = num_hidden_layers
snake_case = intermediate_size
snake_case = squeeze_factor
snake_case = hidden_act
snake_case = num_attention_heads
snake_case = hidden_dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = feat_proj_dropout
snake_case = final_dropout
snake_case = layerdrop
snake_case = layer_norm_eps
snake_case = initializer_range
snake_case = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case = apply_spec_augment
snake_case = mask_time_prob
snake_case = mask_time_length
snake_case = mask_time_min_masks
snake_case = mask_feature_prob
snake_case = mask_feature_length
snake_case = mask_feature_min_masks
# ctc loss
snake_case = ctc_loss_reduction
snake_case = ctc_zero_infinity
# sequence classification
snake_case = use_weighted_layer_sum
snake_case = classifier_proj_size
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 371 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ''''''
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str:
super().__init__(self, **lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case = fsspec.open(
lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case = os.path.basename(self.file.path.split('::' )[0] )
snake_case = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case = None
@classmethod
def _lowerCamelCase ( cls, lowercase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('/' )
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.dir_cache is None:
snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case = {f['name']: f}
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.file.open().read()
def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any:
snake_case = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''bz2'''
snake_case_ = '''bz2'''
snake_case_ = '''.bz2'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''gzip'''
snake_case_ = '''gzip'''
snake_case_ = '''.gz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''lz4'''
snake_case_ = '''lz4'''
snake_case_ = '''.lz4'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''xz'''
snake_case_ = '''xz'''
snake_case_ = '''.xz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''zstd'''
snake_case_ = '''zstd'''
snake_case_ = '''.zst'''
def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]:
super().__init__(
fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case = self.file.__enter__
class lowerCamelCase :
def __init__( self, lowercase_ ) -> List[Any]:
snake_case = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self, *lowercase_, **lowercase_ ) -> Dict:
self._file.__exit__(*lowercase_, **lowercase_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _lowerCamelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self, lowercase_ ) -> List[Any]:
return getattr(self._file, lowercase_ )
def fixed_enter(*lowercase_, **lowercase_ ):
return WrappedFile(_enter(*lowercase_, **lowercase_ ) )
snake_case = fixed_enter
| 332 | 0 |
'''simple docstring'''
from manim import *
class lowerCamelCase ( __lowerCAmelCase ):
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = Rectangle(height=0.5, width=0.5 )
snake_case = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
snake_case = [mem.copy() for i in range(6 )]
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*lowercase_ ).arrange(lowercase_, buff=0 )
snake_case = VGroup(*lowercase_ ).arrange(lowercase_, buff=0 )
snake_case = VGroup(lowercase_, lowercase_ ).arrange(lowercase_, buff=0 )
snake_case = Text('CPU', font_size=24 )
snake_case = Group(lowercase_, lowercase_ ).arrange(lowercase_, buff=0.5, aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
snake_case = [mem.copy() for i in range(1 )]
snake_case = VGroup(*lowercase_ ).arrange(lowercase_, buff=0 )
snake_case = Text('GPU', font_size=24 )
snake_case = Group(lowercase_, lowercase_ ).arrange(lowercase_, buff=0.5, aligned_edge=lowercase_ )
gpu.align_to(lowercase_, lowercase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowercase_ )
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*lowercase_ ).arrange(lowercase_, buff=0 )
snake_case = Text('Model', font_size=24 )
snake_case = Group(lowercase_, lowercase_ ).arrange(lowercase_, buff=0.5, aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowercase_, run_time=1 ), Create(lowercase_, run_time=1 ), Create(lowercase_, run_time=1 ), )
snake_case = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''', font_size=24, )
snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=18, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_, run_time=2.5 ), Write(lowercase_ ), Write(lowercase_ ) )
self.add(lowercase_ )
snake_case = []
snake_case = []
snake_case = []
for i, rect in enumerate(lowercase_ ):
snake_case = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(lowercase_, opacity=0.7 )
cpu_target.move_to(lowercase_ )
cpu_target.generate_target()
snake_case = 0.46 / 4
snake_case = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=lowercase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=lowercase_, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=lowercase_, buff=0.0 )
cpu_targs.append(lowercase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowercase_ ) )
second_animations.append(MoveToTarget(lowercase_, run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait()
| 350 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A , A , A ) -> int | float:
if len(A ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(A )
or left < -len(A )
or right >= len(A )
or right < -len(A )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
snake_case = (left + right) >> 1 # the middle
snake_case = find_max(A , A , A ) # find max in range[left, mid]
snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 332 | 0 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 351 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str:
super().__init__()
# pass init params to Encoder
snake_case = Encoder(
in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, )
snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ )
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
# pass init params to Decoder
snake_case = Decoder(
in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput:
snake_case = self.encoder(lowercase_ )
snake_case = self.quant_conv(lowercase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_ )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case , snake_case , snake_case = self.quantize(lowercase_ )
else:
snake_case = h
snake_case = self.post_quant_conv(lowercase_ )
snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case = sample
snake_case = self.encode(lowercase_ ).latents
snake_case = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 332 | 0 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase_ = Lock()
def __magic_name__ ( A , A , A , A , A , A , A ) -> Any:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A ) -> str:
snake_case = []
snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case = temp_rs
snake_case = temp_rr
for i in range(1 , len(A ) - 1 ):
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case = temp_rs
snake_case = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ) -> Tuple:
snake_case = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*A )
snake_case = odd_even_transposition(A )
print('Sorted List\n' )
print(*A )
if __name__ == "__main__":
main()
| 352 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int:
snake_case = [0]
snake_case = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case = 0
# the area corresponding to the grid that gives the product closest to target
snake_case = 0
# an estimate of b, using the quadratic formula
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the triangle number corresponding to b_floor
snake_case = 42
# the triangle number corresponding to b_ceil
snake_case = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case = floor(A )
snake_case = ceil(A )
snake_case = triangle_numbers[b_floor]
snake_case = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_first_guess * triangle_a
snake_case = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_second_guess * triangle_a
snake_case = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 332 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = tempfile.mkdtemp()
snake_case = BlipImageProcessor()
snake_case = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
snake_case = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
snake_case = InstructBlipProcessor(lowercase_, lowercase_, lowercase_ )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self, **lowercase_ ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname, **lowercase_ ).tokenizer
def _lowerCamelCase ( self, **lowercase_ ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname, **lowercase_ ).image_processor
def _lowerCamelCase ( self, **lowercase_ ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname, **lowercase_ ).qformer_tokenizer
def _lowerCamelCase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ) -> Tuple:
snake_case = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
snake_case = [Image.fromarray(np.moveaxis(lowercase_, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = InstructBlipProcessor(
tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor(), qformer_tokenizer=self.get_qformer_tokenizer(), )
processor.save_pretrained(self.tmpdirname )
snake_case = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)' )
snake_case = self.get_image_processor(do_normalize=lowercase_, padding_value=1.0 )
snake_case = InstructBlipProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowercase_, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowercase_ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowercase_ )
self.assertIsInstance(processor.qformer_tokenizer, lowercase_ )
def _lowerCamelCase ( self ) -> Any:
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = self.get_qformer_tokenizer()
snake_case = InstructBlipProcessor(
tokenizer=lowercase_, image_processor=lowercase_, qformer_tokenizer=lowercase_ )
snake_case = self.prepare_image_inputs()
snake_case = image_processor(lowercase_, return_tensors='np' )
snake_case = processor(images=lowercase_, return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = self.get_qformer_tokenizer()
snake_case = InstructBlipProcessor(
tokenizer=lowercase_, image_processor=lowercase_, qformer_tokenizer=lowercase_ )
snake_case = 'lower newer'
snake_case = processor(text=lowercase_ )
snake_case = tokenizer(lowercase_, return_token_type_ids=lowercase_ )
snake_case = qformer_tokenizer(lowercase_, return_token_type_ids=lowercase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key], encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key], encoded_processor['qformer_' + key] )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = self.get_qformer_tokenizer()
snake_case = InstructBlipProcessor(
tokenizer=lowercase_, image_processor=lowercase_, qformer_tokenizer=lowercase_ )
snake_case = 'lower newer'
snake_case = self.prepare_image_inputs()
snake_case = processor(text=lowercase_, images=lowercase_ )
self.assertListEqual(
list(inputs.keys() ), ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'], )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def _lowerCamelCase ( self ) -> Any:
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = self.get_qformer_tokenizer()
snake_case = InstructBlipProcessor(
tokenizer=lowercase_, image_processor=lowercase_, qformer_tokenizer=lowercase_ )
snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case = processor.batch_decode(lowercase_ )
snake_case = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_, lowercase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = self.get_qformer_tokenizer()
snake_case = InstructBlipProcessor(
tokenizer=lowercase_, image_processor=lowercase_, qformer_tokenizer=lowercase_ )
snake_case = 'lower newer'
snake_case = self.prepare_image_inputs()
snake_case = processor(text=lowercase_, images=lowercase_ )
self.assertListEqual(
list(inputs.keys() ), ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'], )
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 332 | 0 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Optional[Any]:
return F'''gaussian_noise_s={seed}_shape={"_".join([str(lowercase_ ) for s in shape] )}.npy'''
def _lowerCamelCase ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCamelCase ( self, lowercase_=0, lowercase_=(4, 4, 64, 64), lowercase_=False ) -> Tuple:
snake_case = jnp.bfloataa if fpaa else jnp.floataa
snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase_, lowercase_ ) ), dtype=lowercase_ )
return image
def _lowerCamelCase ( self, lowercase_=False, lowercase_="CompVis/stable-diffusion-v1-4" ) -> int:
snake_case = jnp.bfloataa if fpaa else jnp.floataa
snake_case = 'bf16' if fpaa else None
snake_case , snake_case = FlaxUNetaDConditionModel.from_pretrained(
lowercase_, subfolder='unet', dtype=lowercase_, revision=lowercase_ )
return model, params
def _lowerCamelCase ( self, lowercase_=0, lowercase_=(4, 77, 768), lowercase_=False ) -> Optional[Any]:
snake_case = jnp.bfloataa if fpaa else jnp.floataa
snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase_, lowercase_ ) ), dtype=lowercase_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> Optional[Any]:
snake_case , snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4', fpaa=lowercase_ )
snake_case = self.get_latents(lowercase_, fpaa=lowercase_ )
snake_case = self.get_encoder_hidden_states(lowercase_, fpaa=lowercase_ )
snake_case = model.apply(
{'params': params}, lowercase_, jnp.array(lowercase_, dtype=jnp.intaa ), encoder_hidden_states=lowercase_, ).sample
assert sample.shape == latents.shape
snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
snake_case = jnp.array(lowercase_, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase_, lowercase_, atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> Tuple:
snake_case , snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2', fpaa=lowercase_ )
snake_case = self.get_latents(lowercase_, shape=(4, 4, 96, 96), fpaa=lowercase_ )
snake_case = self.get_encoder_hidden_states(lowercase_, shape=(4, 77, 1024), fpaa=lowercase_ )
snake_case = model.apply(
{'params': params}, lowercase_, jnp.array(lowercase_, dtype=jnp.intaa ), encoder_hidden_states=lowercase_, ).sample
assert sample.shape == latents.shape
snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
snake_case = jnp.array(lowercase_, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase_, lowercase_, atol=1E-2 )
| 354 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
snake_case_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
snake_case_ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
snake_case_ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
snake_case_ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''train'''
snake_case_ = '''dev'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int:
snake_case = args
snake_case = is_language_sensitive
snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_, lowercase_ ):
try:
snake_case = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
snake_case = mode
# Load data features from cache or dataset file
snake_case = 'v2' if args.version_2_with_negative else 'v1'
snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
snake_case = time.time()
snake_case = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case = self.old_features['features']
snake_case = self.old_features.get('dataset', lowercase_ )
snake_case = self.old_features.get('examples', lowercase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
snake_case = self.processor.get_dev_examples(args.data_dir )
else:
snake_case = self.processor.get_train_examples(args.data_dir )
snake_case , snake_case = squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, )
snake_case = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
snake_case = self.features[i]
snake_case = torch.tensor(feature.input_ids, dtype=torch.long )
snake_case = torch.tensor(feature.attention_mask, dtype=torch.long )
snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long )
snake_case = torch.tensor(feature.cls_index, dtype=torch.long )
snake_case = torch.tensor(feature.p_mask, dtype=torch.float )
snake_case = torch.tensor(feature.is_impossible, dtype=torch.float )
snake_case = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case = torch.tensor(feature.start_position, dtype=torch.long )
snake_case = torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 332 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case_ = '''maskformer-swin'''
snake_case_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self, lowercase_=224, lowercase_=4, lowercase_=3, lowercase_=96, lowercase_=[2, 2, 6, 2], lowercase_=[3, 6, 12, 24], lowercase_=7, lowercase_=4.0, lowercase_=True, lowercase_=0.0, lowercase_=0.0, lowercase_=0.1, lowercase_="gelu", lowercase_=False, lowercase_=0.02, lowercase_=1E-5, lowercase_=None, lowercase_=None, **lowercase_, ) -> Any:
super().__init__(**lowercase_ )
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = embed_dim
snake_case = depths
snake_case = len(lowercase_ )
snake_case = num_heads
snake_case = window_size
snake_case = mlp_ratio
snake_case = qkv_bias
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = drop_path_rate
snake_case = hidden_act
snake_case = use_absolute_embeddings
snake_case = layer_norm_eps
snake_case = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
snake_case = ['stem'] + [F'''stage{idx}''' for idx in range(1, len(lowercase_ ) + 1 )]
snake_case , snake_case = get_aligned_output_features_output_indices(
out_features=lowercase_, out_indices=lowercase_, stage_names=self.stage_names )
| 355 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A , A , A ) -> Any:
# Initialise PyTorch model
snake_case = BertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case = BertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 332 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 356 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> list:
if len(A ) == 0:
return []
snake_case , snake_case = min(A ), max(A )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 332 | 0 |
'''simple docstring'''
from collections.abc import Callable
def __magic_name__ ( A , A , A ) -> float:
snake_case = a
snake_case = b
if function(A ) == 0: # one of the a or b is a root for the function
return a
elif function(A ) == 0:
return b
elif (
function(A ) * function(A ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
snake_case = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(A ) == 0:
return mid
elif function(A ) * function(A ) < 0:
snake_case = mid
else:
snake_case = mid
snake_case = start + (end - start) / 2.0
return mid
def __magic_name__ ( A ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 357 |
'''simple docstring'''
def __magic_name__ ( A ) -> float:
return 1_0 - x * x
def __magic_name__ ( A , A ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(A ) * equation(A ) >= 0:
raise ValueError('Wrong space!' )
snake_case = a
while (b - a) >= 0.01:
# Find middle point
snake_case = (a + b) / 2
# Check if middle point is root
if equation(A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A ) * equation(A ) < 0:
snake_case = c
else:
snake_case = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 332 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 358 |
'''simple docstring'''
import pytest
lowerCAmelCase_ = "__dummy_dataset1__"
lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def __magic_name__ ( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __magic_name__ ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __magic_name__ ( A , A , A ) -> Optional[int]:
snake_case = dataset_loading_script_name
snake_case = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=A )
snake_case = script_dir / F'''{script_name}.py'''
with open(A , 'w' ) as f:
f.write(A )
return str(A )
| 332 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowerCamelCase :
snake_case_ = 42
snake_case_ = None
snake_case_ = None
def __magic_name__ ( ) -> Node | None:
snake_case = Node(1 )
snake_case = Node(2 )
snake_case = Node(3 )
snake_case = Node(4 )
snake_case = Node(5 )
return tree
def __magic_name__ ( A ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __magic_name__ ( A ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __magic_name__ ( A ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __magic_name__ ( A ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __magic_name__ ( A ) -> Sequence[Node | None]:
snake_case = []
if root is None:
return output
snake_case = deque([root] )
while process_queue:
snake_case = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __magic_name__ ( A , A ) -> Sequence[Node | None]:
snake_case = []
def populate_output(A , A ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(A , A )
return output
def __magic_name__ ( A , A ) -> Sequence[Node | None]:
snake_case = []
def populate_output(A , A ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(A , A )
return output
def __magic_name__ ( A ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
snake_case = []
snake_case = 0
snake_case = height(A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(A , A ) )
snake_case = 1
else:
output.append(get_nodes_from_right_to_left(A , A ) )
snake_case = 0
return output
def __magic_name__ ( ) -> None: # Main function for testing.
snake_case = make_tree()
print(F'''In-order Traversal: {inorder(A )}''' )
print(F'''Pre-order Traversal: {preorder(A )}''' )
print(F'''Post-order Traversal: {postorder(A )}''' , '\n' )
print(F'''Height of Tree: {height(A )}''' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(A ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(A ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(A , level=A ) )
print('\nZigZag order Traversal: ' )
print(zigzag(A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 359 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase_ = Lock()
def __magic_name__ ( A , A , A , A , A , A , A ) -> Any:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A ) -> str:
snake_case = []
snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case = temp_rs
snake_case = temp_rr
for i in range(1 , len(A ) - 1 ):
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case = temp_rs
snake_case = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ) -> Tuple:
snake_case = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*A )
snake_case = odd_even_transposition(A )
print('Sorted List\n' )
print(*A )
if __name__ == "__main__":
main()
| 332 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''Wav2Vec2FeatureExtractor'''
snake_case_ = '''AutoTokenizer'''
def __init__( self, lowercase_, lowercase_ ) -> str:
super().__init__(lowercase_, lowercase_ )
snake_case = self.feature_extractor
snake_case = False
@classmethod
def _lowerCamelCase ( cls, lowercase_, **lowercase_ ) -> Optional[Any]:
try:
return super().from_pretrained(lowercase_, **lowercase_ )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ', lowercase_, )
snake_case = WavaVecaFeatureExtractor.from_pretrained(lowercase_, **lowercase_ )
snake_case = WavaVecaCTCTokenizer.from_pretrained(lowercase_, **lowercase_ )
return cls(feature_extractor=lowercase_, tokenizer=lowercase_ )
def __call__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowercase_, **lowercase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
snake_case = kwargs.pop('raw_speech' )
else:
snake_case = kwargs.pop('audio', lowercase_ )
snake_case = kwargs.pop('sampling_rate', lowercase_ )
snake_case = kwargs.pop('text', lowercase_ )
if len(lowercase_ ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
snake_case = self.feature_extractor(lowercase_, *lowercase_, sampling_rate=lowercase_, **lowercase_ )
if text is not None:
snake_case = self.tokenizer(lowercase_, **lowercase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings['input_ids']
return inputs
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase_, **lowercase_ )
snake_case = kwargs.pop('input_features', lowercase_ )
snake_case = kwargs.pop('labels', lowercase_ )
if len(lowercase_ ) > 0:
snake_case = args[0]
snake_case = args[1:]
if input_features is not None:
snake_case = self.feature_extractor.pad(lowercase_, *lowercase_, **lowercase_ )
if labels is not None:
snake_case = self.tokenizer.pad(lowercase_, **lowercase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
snake_case = labels['input_ids']
return input_features
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> Optional[int]:
return self.tokenizer.batch_decode(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
return self.tokenizer.decode(*lowercase_, **lowercase_ )
@contextmanager
def _lowerCamelCase ( self ) -> Union[str, Any]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
snake_case = True
snake_case = self.tokenizer
yield
snake_case = self.feature_extractor
snake_case = False
| 360 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.