code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
if (ksize % 2) == 0:
A_ = ksize + 1
A_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(A__ ):
for x in range(A__ ):
# distance from center
A_ = x - ksize // 2
A_ = y - ksize // 2
# degree to radiant
A_ = theta / 180 * np.pi
A_ = np.cos(_theta )
A_ = np.sin(_theta )
# get kernel x
A_ = cos_theta * px + sin_theta * py
# get kernel y
A_ = -sin_theta * px + cos_theta * py
# fill kernel
A_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__a :int = imread('../image_data/lena.jpg')
# turn image in gray scale value
__a :str = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__a :Optional[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__a :Optional[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__a :Dict = out / out.max() * 255
__a :Any = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 357 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a :Optional[Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 329 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ):
A_ = 10
def __A ( self : Tuple ):
A_ = [1, 2, 3, 4]
A_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def __A ( self : int ):
A_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def __A ( self : int ):
A_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def __A ( self : Union[str, Any] ):
A_ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
A_ , A_ = process_story(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [] )
def __A ( self : Optional[Any] ):
A_ = ""
A_ , A_ = process_story(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [] )
self.assertEqual(__lowerCAmelCase , [] )
def __A ( self : Optional[Any] ):
A_ = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
A_ , A_ = process_story(__lowerCAmelCase )
A_ = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
A_ = ["It was the best of times."]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self : Any ):
A_ = torch.tensor([1, 2, 3, 4] )
A_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 0 ).numpy() , expected.numpy() )
def __A ( self : List[Any] ):
A_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 23 ).numpy() , expected.numpy() )
def __A ( self : Any ):
A_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 1 ).numpy() , expected.numpy() )
def __A ( self : Optional[Any] ):
A_ = 101
A_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A_ = compute_token_type_ids(__lowerCAmelCase , __lowerCAmelCase )
np.testing.assert_array_equal(__lowerCAmelCase , __lowerCAmelCase )
| 358 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __A ( self : List[str] ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@require_tf
def __A ( self : int ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __A ( self : Any ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __A ( self : Optional[Any] ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 329 | 0 |
from __future__ import annotations
__a :str = "#"
class _a :
"""simple docstring"""
def __init__( self : Tuple ):
A_ = {}
def __A ( self : Tuple , UpperCAmelCase : str ):
A_ = self._trie
for char in text:
if char not in trie:
A_ = {}
A_ = trie[char]
A_ = True
def __A ( self : Optional[Any] , UpperCAmelCase : str ):
A_ = self._trie
for char in prefix:
if char in trie:
A_ = trie[char]
else:
return []
return self._elements(a_ )
def __A ( self : int , UpperCAmelCase : dict ):
A_ = []
for c, v in d.items():
A_ = [" "] if c == END else [(c + s) for s in self._elements(a_ )]
result.extend(a_ )
return tuple(a_ )
__a :Tuple = Trie()
__a :Any = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def __snake_case ( __UpperCamelCase : str ):
A_ = trie.find_word(_UpperCamelCase )
return tuple(string + word for word in suffixes )
def __snake_case ( ):
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 359 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ):
"""simple docstring"""
A_ = []
for _ in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ):
"""simple docstring"""
A_ = []
for step in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase ,"schedule.bin" )
torch.save(scheduler.state_dict() ,__UpperCamelCase )
A_ = torch.load(__UpperCamelCase )
scheduler.load_state_dict(__UpperCamelCase )
return lrs
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def __A ( self : Dict ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , )
for _ in range(1000 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_lowerCamelCase : Any = 1_0
def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ , A_ = data
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ = unwrap_schedule(UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule
A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : List[str] ):
A_ = fn
def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ):
return self.fn(*UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A ( self : Dict , UpperCAmelCase : List[str] ):
A_ = list(map(self , scheduler.lr_lambdas ) )
| 329 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__a :Optional[Any] = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__a :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : int
_lowerCamelCase : str
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Dict ):
A_ = {}
A_ = []
A_ = 1
A_ = [1, 2]
A_ = {"a": 1, "b": 2}
A_ = {"a": [1, 2], "b": [3, 4]}
A_ = {"a": {"1": 1}, "b": 2}
A_ = {"a": 1, "b": 2, "c": 3, "d": 4}
A_ = {}
A_ = []
A_ = 2
A_ = [2, 3]
A_ = {"a": 2, "b": 3}
A_ = {"a": [2, 3], "b": [4, 5]}
A_ = {"a": {"1": 2}, "b": 3}
A_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
A_ = 2
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
A_ = {"a": 2, "b": 0, "c": 2}
A_ = {
"a": np.eye(2 ).astype(UpperCAmelCase ),
"b": np.zeros(3 ).astype(UpperCAmelCase ),
"c": np.ones(2 ).astype(UpperCAmelCase ),
}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase )
def __A ( self : List[str] ):
A_ = {"a": 1, "b": 2}
A_ = {"a": 3, "b": 4}
A_ = {"a": 5, "b": 6}
A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase )
def __A ( self : Any ):
class _a :
"""simple docstring"""
_lowerCamelCase : int = 'bar'
A_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
A_ = {f'''{i}''': i for i in range(__UpperCamelCase )}
A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _a ( snake_case_ ):
"""simple docstring"""
@require_tf
def __A ( self : Union[str, Any] ):
import tensorflow as tf
from tensorflow.keras import layers
A_ = layers.Dense(2 )
def gen_random_output():
A_ = tf.random.uniform((1, 3) )
return model(UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __A ( self : Optional[int] ):
import torch
def gen_random_output():
A_ = torch.nn.Linear(3 , 2 )
A_ = torch.rand(1 , 3 )
return model(UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __A ( self : Any ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A_ = gen_random_output()
with temp_seed(42 ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" ,[{}] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" ,[
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] ,)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def __snake_case ( ):
"""simple docstring"""
A_ = A(x=1 ,y="foobar" )
A_ = {"x": 1, "y": "foobar"}
assert asdict(__UpperCamelCase ) == expected_output
A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]}
A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 ,y="foo" )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return text.split()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __snake_case ( ):
"""simple docstring"""
with Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A_ = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__UpperCamelCase ) == 4
| 329 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__a :int = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if "resnet-50" in model_name:
A_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
A_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
A_ = DetrConfig(use_timm_backbone=a__ ,backbone_config=a__ )
# set label attributes
A_ = "panoptic" in model_name
if is_panoptic:
A_ = 250
else:
A_ = 91
A_ = "huggingface/label-files"
A_ = "coco-detection-id2label.json"
A_ = json.load(open(hf_hub_download(a__ ,a__ ,repo_type="dataset" ) ,"r" ) )
A_ = {int(a__ ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = state_dict.pop(a__ )
A_ = val
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any]=False ):
"""simple docstring"""
A_ = ""
if is_panoptic:
A_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
A_ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[:256, :]
A_ = in_proj_bias[:256]
A_ = in_proj_weight[256:512, :]
A_ = in_proj_bias[256:512]
A_ = in_proj_weight[-256:, :]
A_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
A_ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[:256, :]
A_ = in_proj_bias[:256]
A_ = in_proj_weight[256:512, :]
A_ = in_proj_bias[256:512]
A_ = in_proj_weight[-256:, :]
A_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A_ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
A_ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ = in_proj_weight_cross_attn[:256, :]
A_ = in_proj_bias_cross_attn[:256]
A_ = in_proj_weight_cross_attn[256:512, :]
A_ = in_proj_bias_cross_attn[256:512]
A_ = in_proj_weight_cross_attn[-256:, :]
A_ = in_proj_bias_cross_attn[-256:]
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(a__ ,stream=a__ ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict=None ,__UpperCamelCase : Any=False ):
"""simple docstring"""
A_ , A_ = get_detr_config(a__ )
# load original model from torch hub
A_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f'''Converting model {model_name}...''' )
A_ = torch.hub.load("facebookresearch/detr" ,model_name_to_original_name[model_name] ,pretrained=a__ ).eval()
A_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(a__ ):
if is_panoptic:
A_ = "detr." + src
rename_key(a__ ,a__ ,a__ )
# query, key and value matrices need special treatment
read_in_q_k_v(a__ ,is_panoptic=a__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
A_ = state_dict.pop(a__ )
A_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ = state_dict.pop(a__ )
A_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
A_ = state_dict.pop(a__ )
A_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
A_ = state_dict.pop(a__ )
A_ = val
# finally, create HuggingFace model and load state dict
A_ = DetrForSegmentation(a__ ) if is_panoptic else DetrForObjectDetection(a__ )
model.load_state_dict(a__ )
model.eval()
# verify our conversion on an image
A_ = "coco_panoptic" if is_panoptic else "coco_detection"
A_ = DetrImageProcessor(format=a__ )
A_ = processor(images=prepare_img() ,return_tensors="pt" )
A_ = encoding["pixel_values"]
A_ = detr(a__ )
A_ = model(a__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-3 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(f'''nielsr/{model_name}''' )
processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__a :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
__a :Optional[int] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 361 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for char in word:
A_ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
for token in tokens:
A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A_ = list(__UpperCamelCase )
return word_list
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(__UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
A_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
A_ = "##" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ):
"""simple docstring"""
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
A_ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__a :Dict = parser.parse_args()
main(args)
| 329 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : str ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = LxmertConfig.from_json_file(lowercase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
A_ = LxmertForPreTraining(lowercase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase_ ,lowercase_ ,lowercase_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,lowercase_ )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 362 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __snake_case ( __UpperCamelCase : Features ):
"""simple docstring"""
A_ = np.inf
def set_batch_size(__UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and feature.dtype == "binary":
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCamelCase ,__UpperCamelCase )
return None if batch_size is np.inf else batch_size
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Tuple , ):
super().__init__(
UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , )
A_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES["parquet"][1]
A_ = Parquet(
cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , hash=UpperCAmelCase , **UpperCAmelCase , )
def __A ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : Union[PathLike, BinaryIO] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ):
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self : int ):
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
A_ = self._write(file_obj=UpperCAmelCase , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
return written
def __A ( self : Tuple , UpperCAmelCase : BinaryIO , UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ):
A_ = 0
A_ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase , **UpperCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
A_ = query_table(
table=self.dataset._data , key=slice(UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCAmelCase )
written += batch.nbytes
writer.close()
return written
| 329 | 0 |
def __snake_case ( __UpperCamelCase : list[int] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase__ ) )
def __snake_case ( __UpperCamelCase : list[list[int]] ,__UpperCamelCase : int ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ):
"""simple docstring"""
if index == len(UpperCamelCase__ ):
return True
# Recursive Step
for i in range(UpperCamelCase__ ):
if valid_coloring(graph[index] ,UpperCamelCase__ ,UpperCamelCase__ ):
# Color current vertex
A_ = i
# Validate coloring
if util_color(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,index + 1 ):
return True
# Backtrack
A_ = -1
return False
def __snake_case ( __UpperCamelCase : list[list[int]] ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = [-1] * len(UpperCamelCase__ )
if util_color(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,0 ):
return colored_vertices
return []
| 363 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int = 4 ):
"""simple docstring"""
A_ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = matrix[::-1]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [x[::-1] for x in matrix]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 329 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[int]=False ):
"""simple docstring"""
A_ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ = ""
else:
A_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_a ,_a )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = dct.pop(_a )
A_ = val
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(_a ,stream=_a ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = BitConfig(
global_padding="same" ,layer_type="bottleneck" ,depths=(3, 4, 9) ,out_features=["stage3"] ,embedding_dynamic_padding=_a ,)
A_ = ViTHybridConfig(backbone_config=_a ,image_size=384 ,num_labels=1000 )
A_ = False
# load original model from timm
A_ = timm.create_model(_a ,pretrained=_a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
if base_model:
remove_classification_head_(_a )
A_ = create_rename_keys(_a ,_a )
for src, dest in rename_keys:
rename_key(_a ,_a ,_a )
read_in_q_k_v(_a ,_a ,_a )
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(_a ,_a ,repo_type="dataset" ) ,"r" ) )
A_ = {int(_a ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ = ViTHybridModel(_a ).eval()
else:
A_ = ViTHybridForImageClassification(_a ).eval()
model.load_state_dict(_a )
# create image processor
A_ = create_transform(**resolve_data_config({} ,model=_a ) )
A_ = transform.transforms
A_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A_ = ViTHybridImageProcessor(
do_resize=_a ,size={"shortest_edge": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=_a ,crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} ,do_normalize=_a ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
A_ = prepare_img()
A_ = transform(_a ).unsqueeze(0 )
A_ = processor(_a ,return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_a ,_a )
# verify logits
with torch.no_grad():
A_ = model(_a )
A_ = outputs.logits
print("Predicted class:" ,logits.argmax(-1 ).item() )
if base_model:
A_ = timm_model.forward_features(_a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_a ,outputs.pooler_output ,atol=1E-3 )
else:
A_ = timm_model(_a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_a ,outputs.logits ,atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_a )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_a )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
__a :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
__a :Optional[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 364 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Any = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 329 | 0 |
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = len(_UpperCAmelCase )
for i in range(1 ,_UpperCAmelCase ):
A_ = collection[i]
A_ = 0
A_ = i - 1
while low <= high:
A_ = (low + high) // 2
if val < collection[mid]:
A_ = mid - 1
else:
A_ = mid + 1
for j in range(_UpperCAmelCase ,_UpperCAmelCase ,-1 ):
A_ = collection[j - 1]
A_ = val
return collection
if __name__ == "__main__":
__a :Any = input('Enter numbers separated by a comma:\n').strip()
__a :Optional[Any] = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 365 |
import itertools
import math
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( ):
"""simple docstring"""
A_ = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __snake_case ( __UpperCamelCase : int = 1_0001 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__a :Dict = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
__a :List[Any] = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
__a :int = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def __A ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[str]="auto" , UpperCAmelCase : Any=-1 , UpperCAmelCase : Union[str, Any]=0.9 , UpperCAmelCase : List[Any]=5 , UpperCAmelCase : Optional[int]=500 , UpperCAmelCase : List[str]="gpt2-large" , UpperCAmelCase : Union[str, Any]=-1 , UpperCAmelCase : List[Any]=1024 , UpperCAmelCase : str=25 , UpperCAmelCase : Union[str, Any]=5 , UpperCAmelCase : str=True , UpperCAmelCase : Any=25 , ):
A_ = compute_mauve(
p_text=lowerCAmelCase__ , q_text=lowerCAmelCase__ , p_features=lowerCAmelCase__ , q_features=lowerCAmelCase__ , p_tokens=lowerCAmelCase__ , q_tokens=lowerCAmelCase__ , num_buckets=lowerCAmelCase__ , pca_max_data=lowerCAmelCase__ , kmeans_explained_var=lowerCAmelCase__ , kmeans_num_redo=lowerCAmelCase__ , kmeans_max_iter=lowerCAmelCase__ , featurize_model_name=lowerCAmelCase__ , device_id=lowerCAmelCase__ , max_text_length=lowerCAmelCase__ , divergence_curve_discretization_size=lowerCAmelCase__ , mauve_scaling_factor=lowerCAmelCase__ , verbose=lowerCAmelCase__ , seed=lowerCAmelCase__ , )
return out
| 366 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
| 329 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__a :Union[str, Any] = False
class _a ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Any ):
A_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A_ = "A painting of a squirrel eating a burger "
A_ = torch.manual_seed(0 )
A_ = pipe(
prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
A_ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A_ = generator.manual_seed(0 )
A_ = pipe(
prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __A ( self : str ):
A_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A_ = "A painting of a squirrel eating a burger "
A_ = torch.manual_seed(0 )
A_ = pipe(
prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
A_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 367 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Dict = logging.get_logger(__name__)
__a :int = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'realm'
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
# Common config
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = retriever_proj_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_candidates
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
# Reader config
A_ = span_hidden_size
A_ = max_span_width
A_ = reader_layer_norm_eps
A_ = reader_beam_size
A_ = reader_seq_len
# Retrieval config
A_ = num_block_records
A_ = searcher_beam_size
| 329 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__a :Union[str, Any] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__a :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 368 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = original_name.split("." )[0]
A_ = key.split("." )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
A_ = orig_block_num - offset
A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = OrderedDict()
A_ , A_ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
A_ = key.replace("network" ,"poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
A_ = key[: key.find("proj" )]
A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' )
A_ = key.replace("proj" ,"projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
A_ = "poolformer.encoder." + key
if "mlp.fc1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" )
if "mlp.fc2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" )
if "norm1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" )
if "norm2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" )
if "layer_scale_1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" )
if "layer_scale_2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" )
if "head" in key:
A_ = key.replace("head" ,"classifier" )
A_ = value
return new_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = PoolFormerConfig()
# set attributes based on model_name
A_ = "huggingface/label-files"
A_ = model_name[-3:]
A_ = 1000
A_ = "imagenet-1k-id2label.json"
A_ = (1, 1000)
# set config attributes
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ = [2, 2, 6, 2]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s24":
A_ = [4, 4, 12, 4]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s36":
A_ = [6, 6, 18, 6]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 1E-6
A_ = 0.9
elif size == "m36":
A_ = [6, 6, 18, 6]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
elif size == "m48":
A_ = [8, 8, 24, 8]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) )
# rename keys
A_ = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
A_ = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values
# forward pass
A_ = model(__UpperCamelCase )
A_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
A_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
A_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
A_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
A_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__a :int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : List[str] ):
A_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , "tf_padding" ) )
self.parent.assertTrue(hasattr(_a , "depth_multiplier" ) )
class _a :
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Any=3 , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : List[Any]=0.25 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=1024 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : str="relu6" , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict=10 , UpperCAmelCase : Optional[Any]=None , ):
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = depth_multiplier
A_ = min_depth
A_ = tf_padding
A_ = int(last_hidden_size * depth_multiplier )
A_ = output_stride
A_ = hidden_act
A_ = classifier_dropout_prob
A_ = use_labels
A_ = is_training
A_ = num_labels
A_ = initializer_range
A_ = scope
def __A ( self : int ):
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.num_labels )
A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : Optional[Any] ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : int ):
A_ = MobileNetVaModel(config=_a )
model.to(_a )
model.eval()
A_ = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] ):
A_ = self.num_labels
A_ = MobileNetVaForImageClassification(_a )
model.to(_a )
model.eval()
A_ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Optional[int] ):
A_ = self.prepare_config_and_inputs()
A_ = config_and_inputs
A_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_lowerCamelCase : List[str] = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
def __A ( self : str ):
A_ = MobileNetVaModelTester(self )
A_ = MobileNetVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __A ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def __A ( self : List[str] ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def __A ( self : Optional[Any] ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def __A ( self : str ):
pass
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_a )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def __A ( self : Union[str, Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __A ( self : Tuple ):
def check_hidden_states_output(UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] ):
A_ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(_a , _a ) )
A_ = outputs.hidden_states
A_ = 26
self.assertEqual(len(_a ) , _a )
A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(_a , _a , _a )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __A ( self : Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = MobileNetVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __snake_case ( ):
"""simple docstring"""
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def __A ( self : Optional[Any] ):
A_ = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(_a )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
A_ = model(**_a )
# verify the logits
A_ = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _a )
A_ = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 369 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : torch.FloatTensor
_lowerCamelCase : Optional[torch.FloatTensor] = None
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A_ = []
for i in range(__UpperCamelCase ):
A_ = i / num_diffusion_timesteps
A_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) )
return torch.tensor(__UpperCamelCase ,dtype=torch.floataa )
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
A_ = betas_for_alpha_bar(UpperCAmelCase )
A_ = 1.0 - self.betas
A_ = torch.cumprod(self.alphas , dim=0 )
A_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A_ = 1.0
# setable values
A_ = None
A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() )
A_ = variance_type
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
A_ = num_inference_steps
A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ):
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) )
A_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A_ = variance.log()
A_ = beta.log()
A_ = (predicted_variance + 1) / 2
A_ = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ):
A_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 )
else:
A_ = None
# 1. compute alphas, betas
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
A_ = self.alphas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
A_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A_ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A_ = torch.clamp(
UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A_ = 0
if t > 0:
A_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device )
A_ = self._get_variance(
UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
A_ = variance
elif self.variance_type == "learned_range":
A_ = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
A_ = variance * variance_noise
A_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A_ = timesteps.to(original_samples.device )
A_ = alphas_cumprod[timesteps] ** 0.5
A_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_alpha_prod.unsqueeze(-1 )
A_ = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 329 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( _a , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Tuple = LDMTextToImagePipeline
_lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_PARAMS - {
'negative_prompt',
'negative_prompt_embeds',
'cross_attention_kwargs',
'prompt_embeds',
}
_lowerCamelCase : Optional[int] = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'callback',
'callback_steps',
}
_lowerCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : List[str] = False
def __A ( self : int ):
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
A_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ = CLIPTextModel(_a )
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A_ = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any]=0 ):
if str(_a ).startswith("mps" ):
A_ = torch.manual_seed(_a )
else:
A_ = torch.Generator(device=_a ).manual_seed(_a )
A_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __A ( self : Dict ):
A_ = "cpu" # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = LDMTextToImagePipeline(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ = self.get_dummy_inputs(_a )
A_ = pipe(**_a ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A_ = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : str , UpperCAmelCase : Any , UpperCAmelCase : Any=torch.floataa , UpperCAmelCase : List[str]=0 ):
A_ = torch.manual_seed(_a )
A_ = np.random.RandomState(_a ).standard_normal((1, 4, 32, 32) )
A_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
A_ = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __A ( self : Any ):
A_ = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ = self.get_inputs(_a )
A_ = pipe(**_a ).images
A_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A_ = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] )
A_ = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any]=torch.floataa , UpperCAmelCase : Tuple=0 ):
A_ = torch.manual_seed(_a )
A_ = np.random.RandomState(_a ).standard_normal((1, 4, 32, 32) )
A_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
A_ = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __A ( self : int ):
A_ = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ = self.get_inputs(_a )
A_ = pipe(**_a ).images[0]
A_ = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
A_ = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 370 |
from math import isqrt, loga
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,__UpperCamelCase ,__UpperCamelCase ):
A_ = False
return [i for i in range(2 ,__UpperCamelCase ) if is_prime[i]]
def __snake_case ( __UpperCamelCase : int = 80_0800 ,__UpperCamelCase : int = 80_0800 ):
"""simple docstring"""
A_ = degree * loga(__UpperCamelCase )
A_ = int(__UpperCamelCase )
A_ = calculate_prime_numbers(__UpperCamelCase )
A_ = 0
A_ = 0
A_ = len(__UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__a :Optional[Any] = 'http://www.mocksite.com/file1.txt'
__a :int = '\"text\": [\"foo\", \"foo\"]'
__a :Dict = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class _a :
"""simple docstring"""
_lowerCamelCase : Optional[int] = 2_0_0
_lowerCamelCase : Any = {'Content-Length': '100'}
_lowerCamelCase : Union[str, Any] = {}
def __A ( self : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ):
return [bytes(snake_case_ , "utf-8" )]
def __snake_case ( *__UpperCamelCase : int ,**__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("urls_type" ,[str, list, dict] )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCAmelCase ,"request" ,_lowerCAmelCase )
A_ = URL
if issubclass(_lowerCAmelCase ,_lowerCAmelCase ):
A_ = url
elif issubclass(_lowerCAmelCase ,_lowerCAmelCase ):
A_ = [url]
elif issubclass(_lowerCAmelCase ,_lowerCAmelCase ):
A_ = {"""train""": url}
A_ = """dummy"""
A_ = """downloads"""
A_ = tmp_path
A_ = DownloadConfig(
cache_dir=os.path.join(_lowerCAmelCase ,_lowerCAmelCase ) ,use_etag=_lowerCAmelCase ,)
A_ = DownloadManager(dataset_name=_lowerCAmelCase ,download_config=_lowerCAmelCase )
A_ = dl_manager.download(_lowerCAmelCase )
A_ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
A_ = [downloaded_paths]
A_ = [urls]
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
A_ = downloaded_paths.values()
A_ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCAmelCase ,_lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
A_ = Path(_lowerCAmelCase )
A_ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
A_ = downloaded_path.read_text()
assert content == CONTENT
A_ = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
A_ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" ,[str, list, dict] )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = str(_lowerCAmelCase )
if issubclass(_lowerCAmelCase ,_lowerCAmelCase ):
A_ = filename
elif issubclass(_lowerCAmelCase ,_lowerCAmelCase ):
A_ = [filename]
elif issubclass(_lowerCAmelCase ,_lowerCAmelCase ):
A_ = {"""train""": filename}
A_ = """dummy"""
A_ = xz_file.parent
A_ = """extracted"""
A_ = DownloadConfig(
cache_dir=_lowerCAmelCase ,use_etag=_lowerCAmelCase ,)
A_ = DownloadManager(dataset_name=_lowerCAmelCase ,download_config=_lowerCAmelCase )
A_ = dl_manager.extract(_lowerCAmelCase )
A_ = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
A_ = [extracted_paths]
A_ = [paths]
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
assert "train" in extracted_paths.keys()
A_ = extracted_paths.values()
A_ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCAmelCase ,_lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
A_ = Path(_lowerCAmelCase )
A_ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCAmelCase ,etag=_lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
A_ = extracted_path.read_text()
A_ = text_file.read_text()
assert extracted_file_content == expected_file_content
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ):
"""simple docstring"""
assert path.endswith(".jsonl" )
for num_items, line in enumerate(_lowerCAmelCase ,start=1 ):
A_ = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" ,["tar_jsonl_path", "zip_jsonl_path"] )
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = request.getfixturevalue(_lowerCAmelCase )
A_ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCAmelCase ) ,start=1 ):
_test_jsonl(_lowerCAmelCase ,_lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" ,["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = request.getfixturevalue(_lowerCAmelCase )
A_ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCAmelCase ) ,start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCAmelCase ) ,start=1 ):
_test_jsonl(_lowerCAmelCase ,_lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCAmelCase ) ,start=1 ):
assert os.path.basename(_lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 371 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a :str = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = RobertaPreLayerNormConfig.from_pretrained(
__UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) )
A_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A_ = tensor_value
A_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
# convert tokenizer
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 329 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a :Optional[Any] = logging.get_logger(__name__)
# General docstring
__a :Optional[Any] = 'RegNetConfig'
# Base docstring
__a :List[str] = 'facebook/regnet-y-040'
__a :Tuple = [1, 1088, 7, 7]
# Image classification docstring
__a :Optional[Any] = 'facebook/regnet-y-040'
__a :int = 'tabby, tabby cat'
__a :Union[str, Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[str] = "relu" , **UpperCAmelCase : Tuple , ):
super().__init__(**__lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ = tf.keras.layers.ConvaD(
filters=__lowercase , kernel_size=__lowercase , strides=__lowercase , padding="VALID" , groups=__lowercase , use_bias=__lowercase , name="convolution" , )
A_ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
A_ = ACTaFN[activation] if activation is not None else tf.identity
def __A ( self : Dict , UpperCAmelCase : Optional[Any] ):
A_ = self.convolution(self.padding(__lowercase ) )
A_ = self.normalization(__lowercase )
A_ = self.activation(__lowercase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : RegNetConfig , **UpperCAmelCase : Optional[Any] ):
super().__init__(**__lowercase )
A_ = config.num_channels
A_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __A ( self : Tuple , UpperCAmelCase : Any ):
A_ = shape_list(__lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ = tf.transpose(__lowercase , perm=(0, 2, 3, 1) )
A_ = self.embedder(__lowercase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : int , UpperCAmelCase : int = 2 , **UpperCAmelCase : Optional[Any] ):
super().__init__(**__lowercase )
A_ = tf.keras.layers.ConvaD(
filters=__lowercase , kernel_size=1 , strides=__lowercase , use_bias=__lowercase , name="convolution" )
A_ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def __A ( self : Union[str, Any] , UpperCAmelCase : tf.Tensor , UpperCAmelCase : bool = False ):
return self.normalization(self.convolution(__lowercase ) , training=__lowercase )
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : int , UpperCAmelCase : int , **UpperCAmelCase : int ):
super().__init__(**__lowercase )
A_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name="pooler" )
A_ = [
tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __A ( self : List[Any] , UpperCAmelCase : Optional[int] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
A_ = self.pooler(__lowercase )
for layer_module in self.attention:
A_ = layer_module(__lowercase )
A_ = hidden_state * pooled
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : RegNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , **UpperCAmelCase : int ):
super().__init__(**__lowercase )
A_ = in_channels != out_channels or stride != 1
A_ = max(1 , out_channels // config.groups_width )
A_ = (
TFRegNetShortCut(__lowercase , stride=__lowercase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ = [
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name="layer.2" ),
]
A_ = ACTaFN[config.hidden_act]
def __A ( self : List[str] , UpperCAmelCase : Any ):
A_ = hidden_state
for layer_module in self.layers:
A_ = layer_module(__lowercase )
A_ = self.shortcut(__lowercase )
hidden_state += residual
A_ = self.activation(__lowercase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : RegNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , **UpperCAmelCase : List[str] ):
super().__init__(**__lowercase )
A_ = in_channels != out_channels or stride != 1
A_ = max(1 , out_channels // config.groups_width )
A_ = (
TFRegNetShortCut(__lowercase , stride=__lowercase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
A_ = [
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name="layer.3" ),
]
A_ = ACTaFN[config.hidden_act]
def __A ( self : int , UpperCAmelCase : List[Any] ):
A_ = hidden_state
for layer_module in self.layers:
A_ = layer_module(__lowercase )
A_ = self.shortcut(__lowercase )
hidden_state += residual
A_ = self.activation(__lowercase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : RegNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , **UpperCAmelCase : Any ):
super().__init__(**__lowercase )
A_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
A_ = [
# downsampling is done in the first layer with stride of 2
layer(__lowercase , __lowercase , __lowercase , stride=__lowercase , name="layers.0" ),
*[layer(__lowercase , __lowercase , __lowercase , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def __A ( self : str , UpperCAmelCase : Dict ):
for layer_module in self.layers:
A_ = layer_module(__lowercase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : RegNetConfig , **UpperCAmelCase : List[Any] ):
super().__init__(**__lowercase )
A_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
A_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__lowercase , __lowercase , __lowercase , depth=__lowercase , name=f'''stages.{i+1}''' ) )
def __A ( self : str , UpperCAmelCase : tf.Tensor , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True ):
A_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ = hidden_states + (hidden_state,)
A_ = stage_module(__lowercase )
if output_hidden_states:
A_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowercase , hidden_states=__lowercase )
@keras_serializable
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
_lowerCamelCase : Dict = RegNetConfig
def __init__( self : List[Any] , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ):
super().__init__(**__lowercase )
A_ = config
A_ = TFRegNetEmbeddings(__lowercase , name="embedder" )
A_ = TFRegNetEncoder(__lowercase , name="encoder" )
A_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name="pooler" )
@unpack_inputs
def __A ( self : Union[str, Any] , UpperCAmelCase : tf.Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , ):
A_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ = return_dict if return_dict is not None else self.config.use_return_dict
A_ = self.embedder(__lowercase , training=__lowercase )
A_ = self.encoder(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase )
A_ = encoder_outputs[0]
A_ = self.pooler(__lowercase )
# Change to NCHW output format have uniformity in the modules
A_ = tf.transpose(__lowercase , perm=(0, 3, 1, 2) )
A_ = tf.transpose(__lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ = tuple([tf.transpose(__lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowercase , pooler_output=__lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _a ( __lowerCamelCase ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = RegNetConfig
_lowerCamelCase : List[str] = """regnet"""
_lowerCamelCase : Any = """pixel_values"""
@property
def __A ( self : Tuple ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__a :Optional[Any] = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a :Optional[int] = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __lowerCamelCase , )
class _a ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : RegNetConfig , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ):
super().__init__(__lowercase , *__lowercase , **__lowercase )
A_ = TFRegNetMainLayer(__lowercase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __A ( self : Optional[Any] , UpperCAmelCase : tf.Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Tuple=False , ):
A_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ = return_dict if return_dict is not None else self.config.use_return_dict
A_ = self.regnet(
pixel_values=__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __lowerCamelCase , )
class _a ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : RegNetConfig , *UpperCAmelCase : str , **UpperCAmelCase : str ):
super().__init__(__lowercase , *__lowercase , **__lowercase )
A_ = config.num_labels
A_ = TFRegNetMainLayer(__lowercase , name="regnet" )
# classification head
A_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __A ( self : Union[str, Any] , UpperCAmelCase : tf.Tensor = None , UpperCAmelCase : tf.Tensor = None , UpperCAmelCase : bool = None , UpperCAmelCase : bool = None , UpperCAmelCase : List[Any]=False , ):
A_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ = return_dict if return_dict is not None else self.config.use_return_dict
A_ = self.regnet(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase )
A_ = outputs.pooler_output if return_dict else outputs[1]
A_ = self.classifier[0](__lowercase )
A_ = self.classifier[1](__lowercase )
A_ = None if labels is None else self.hf_compute_loss(labels=__lowercase , logits=__lowercase )
if not return_dict:
A_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states )
| 350 |
from maths.prime_factors import prime_factors
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 | 0 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
__a :List[str] = get_logger(__name__)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any]=None ):
A_ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
A_ = module._original_module if isinstance(UpperCAmelCase , _PatchedModuleObj ) else module
class _a :
"""simple docstring"""
_lowerCamelCase : Any = []
def __init__( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any]=None ):
A_ = obj
A_ = target
A_ = new
A_ = target.split("." )[0]
A_ = {}
A_ = attrs or []
def __enter__( self : str ):
A_ = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(UpperCAmelCase ) ):
try:
A_ = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ = getattr(self.obj , UpperCAmelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(UpperCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ = obj_attr
# patch at top level
setattr(self.obj , UpperCAmelCase , _PatchedModuleObj(UpperCAmelCase , attrs=self.attrs ) )
A_ = getattr(self.obj , UpperCAmelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(UpperCAmelCase , UpperCAmelCase , _PatchedModuleObj(getattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , attrs=self.attrs ) )
A_ = getattr(UpperCAmelCase , UpperCAmelCase )
# finally set the target attribute
setattr(UpperCAmelCase , UpperCAmelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ = getattr(import_module(".".join(UpperCAmelCase ) ) , UpperCAmelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , UpperCAmelCase ) is attr_value:
A_ = getattr(self.obj , UpperCAmelCase )
setattr(self.obj , UpperCAmelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ = globals()['__builtins__'][target_attr]
setattr(self.obj , UpperCAmelCase , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self : List[str] , *UpperCAmelCase : Union[str, Any] ):
for attr in list(self.original ):
setattr(self.obj , UpperCAmelCase , self.original.pop(UpperCAmelCase ) )
def __A ( self : Union[str, Any] ):
self.__enter__()
self._active_patches.append(self )
def __A ( self : Any ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 351 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a :int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
__a :Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
__a :str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
__a :List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
__a :Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 329 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a :int = logging.get_logger(__name__)
__a :Optional[int] = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _a ( lowerCAmelCase_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = """data2vec-text"""
def __init__( self : Optional[int] , UpperCAmelCase : List[Any]=30522 , UpperCAmelCase : Union[str, Any]=768 , UpperCAmelCase : Any=12 , UpperCAmelCase : Optional[int]=12 , UpperCAmelCase : Union[str, Any]=3072 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Optional[int]=512 , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Tuple=1E-12 , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : Dict=0 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[Any]="absolute" , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : int=None , **UpperCAmelCase : Dict , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
class _a ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def __A ( self : List[str] ):
if self.task == "multiple-choice":
A_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a :Union[str, Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 329 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :int = logging.get_logger(__name__)
__a :Union[str, Any] = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = 'autoformer'
_lowerCamelCase : str = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Union[str, Any] , UpperCAmelCase : List[str] = None , UpperCAmelCase : List[str] = None , UpperCAmelCase : Optional[Any] = "student_t" , UpperCAmelCase : Optional[int] = "nll" , UpperCAmelCase : Dict = 1 , UpperCAmelCase : List[str] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase : Any = True , UpperCAmelCase : Any = 0 , UpperCAmelCase : Dict = 0 , UpperCAmelCase : Union[str, Any] = 0 , UpperCAmelCase : Any = 0 , UpperCAmelCase : List[str] = None , UpperCAmelCase : str = None , UpperCAmelCase : Tuple = 64 , UpperCAmelCase : List[str] = 2 , UpperCAmelCase : Optional[Any] = 2 , UpperCAmelCase : List[Any] = 2 , UpperCAmelCase : Optional[Any] = 2 , UpperCAmelCase : str = 32 , UpperCAmelCase : Dict = 32 , UpperCAmelCase : Any = "gelu" , UpperCAmelCase : str = 0.1 , UpperCAmelCase : Optional[int] = 0.1 , UpperCAmelCase : Optional[int] = 0.1 , UpperCAmelCase : List[str] = 0.1 , UpperCAmelCase : Optional[int] = 0.1 , UpperCAmelCase : List[str] = 100 , UpperCAmelCase : Any = 0.02 , UpperCAmelCase : Union[str, Any] = True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[int] = 10 , UpperCAmelCase : Optional[Any] = 25 , UpperCAmelCase : int = 3 , **UpperCAmelCase : int , ):
# time series specific configuration
A_ = prediction_length
A_ = context_length if context_length is not None else prediction_length
A_ = distribution_output
A_ = loss
A_ = input_size
A_ = num_time_features
A_ = lags_sequence
A_ = scaling
A_ = num_dynamic_real_features
A_ = num_static_real_features
A_ = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
A_ = cardinality
else:
A_ = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
A_ = embedding_dimension
else:
A_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ = num_parallel_samples
# Transformer architecture configuration
A_ = input_size * len(self.lags_sequence ) + self._number_of_features
A_ = d_model
A_ = encoder_attention_heads
A_ = decoder_attention_heads
A_ = encoder_ffn_dim
A_ = decoder_ffn_dim
A_ = encoder_layers
A_ = decoder_layers
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = activation_function
A_ = init_std
A_ = use_cache
# Autoformer
A_ = label_length
A_ = moving_average
A_ = autocorrelation_factor
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : Any ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 353 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ):
return False
return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ):
"""simple docstring"""
A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A_ = is_compiled_module(__UpperCamelCase )
if is_compiled:
A_ = model
A_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = model.module
if not keep_fpaa_wrapper:
A_ = getattr(__UpperCamelCase ,"forward" )
A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase ,"__wrapped__" ):
A_ = forward.__wrapped__
if forward == original_forward:
break
A_ = forward
if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ):
convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase )
if is_compiled:
A_ = model
A_ = compiled_model
return model
def __snake_case ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase ,__UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase ,__UpperCamelCase )
@contextmanager
def __snake_case ( **__UpperCamelCase : Any ):
"""simple docstring"""
for key, value in kwargs.items():
A_ = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ):
A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,"__qualname__" ):
return obj.__qualname__
if hasattr(__UpperCamelCase ,"__name__" ):
return obj.__name__
return str(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
for key, value in source.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = destination.setdefault(__UpperCamelCase ,{} )
merge_dicts(__UpperCamelCase ,__UpperCamelCase )
else:
A_ = value
return destination
def __snake_case ( __UpperCamelCase : int = None ):
"""simple docstring"""
if port is None:
A_ = 2_9500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 329 | 0 |
__a :Optional[Any] = 6_5521
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = 1
A_ = 0
for plain_chr in plain_text:
A_ = (a + ord(_lowerCAmelCase )) % MOD_ADLER
A_ = (b + a) % MOD_ADLER
return (b << 16) | a
| 354 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ):
A_ = tempfile.mkdtemp()
A_ = BlipImageProcessor()
A_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
A_ = BlipProcessor(UpperCAmelCase , UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer
def __A ( self : Optional[Any] , **UpperCAmelCase : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def __A ( self : Any ):
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ):
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : Any ):
A_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
A_ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCAmelCase , return_tensors="np" )
A_ = processor(images=UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self : int ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = processor(text=UpperCAmelCase )
A_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Tuple ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def __A ( self : Any ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(UpperCAmelCase )
A_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 329 | 0 |
from jiwer import compute_measures
import datasets
__a :Optional[int] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a :Optional[int] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__a :List[str] = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __A ( self : str , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=False ):
if concatenate_texts:
return compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["wer"]
else:
A_ = 0
A_ = 0
for prediction, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ = compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 355 |
import math
__a :Union[str, Any] = 10
__a :Union[str, Any] = 7
__a :int = BALLS_PER_COLOUR * NUM_COLOURS
def __snake_case ( __UpperCamelCase : int = 20 ):
"""simple docstring"""
A_ = math.comb(__UpperCamelCase ,__UpperCamelCase )
A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase )
A_ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 329 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a :Any = 16
__a :Any = 32
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : int = 16 ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained("bert-base-cased" )
A_ = load_dataset("glue" ,"mrpc" )
def tokenize_function(__UpperCamelCase : Any ):
# max_length=None => use the model max length (it's actually the default)
A_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__UpperCamelCase : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ = 16
elif accelerator.mixed_precision != "no":
A_ = 8
else:
A_ = None
return tokenizer.pad(
__UpperCamelCase ,padding="longest" ,max_length=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_tensors="pt" ,)
# Instantiate dataloaders.
A_ = DataLoader(
tokenized_datasets["train"] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
A_ = DataLoader(
tokenized_datasets["validation"] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a :Union[str, Any] = mocked_dataloaders # noqa: F811
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : int ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__UpperCamelCase ) == "1":
A_ = 2
# Initialize accelerator
A_ = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ = config['lr']
A_ = int(config["num_epochs"] )
A_ = int(config["seed"] )
A_ = int(config["batch_size"] )
A_ = evaluate.load("glue" ,"mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__UpperCamelCase )
def inner_training_loop(__UpperCamelCase : List[str] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ = model.to(accelerator.device )
# Instantiate optimizer
A_ = AdamW(params=model.parameters() ,lr=__UpperCamelCase )
A_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase )
# Instantiate scheduler
A_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=100 ,num_training_steps=(len(__UpperCamelCase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A_ = model(**__UpperCamelCase )
A_ = outputs.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
A_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
A_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __snake_case ( ):
"""simple docstring"""
A_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__UpperCamelCase ,default=__UpperCamelCase ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
A_ = parser.parse_args()
A_ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 356 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a :Optional[Any] = logging.get_logger(__name__)
__a :Any = {'vocab_file': 'vocab.txt'}
__a :Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a :List[str] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a :List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = ConvBertTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 329 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__a :List[str] = datasets.load_iris()
__a :Optional[Any] = np.array(data['data'])
__a :str = np.array(data['target'])
__a :List[Any] = data["target_names"]
__a :Union[str, Any] = train_test_split(X, y)
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
return np.linalg.norm(np.array(__UpperCamelCase ) - np.array(__UpperCamelCase ) )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : str=5 ):
"""simple docstring"""
A_ = zip(__UpperCamelCase ,__UpperCamelCase )
# List of distances of all points from the point to be classified
A_ = []
for data_point in data:
A_ = euclidean_distance(data_point[0] ,__UpperCamelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ = [i[1] for i in sorted(__UpperCamelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ = Counter(__UpperCamelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 357 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a :Optional[Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 329 | 0 |
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
stooge(A__ ,0 ,len(A__ ) - 1 )
return arr
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
A_ , A_ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
A_ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(A__ ,A__ ,(h - t) )
# Recursively sort last 2/3 elements
stooge(A__ ,i + t ,(A__) )
# Recursively sort first 2/3 elements
stooge(A__ ,A__ ,(h - t) )
if __name__ == "__main__":
__a :str = input('Enter numbers separated by a comma:\n').strip()
__a :Optional[Any] = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 358 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __A ( self : List[str] ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@require_tf
def __A ( self : int ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __A ( self : Any ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __A ( self : Optional[Any] ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 329 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :int = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :int = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__a :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 359 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ):
"""simple docstring"""
A_ = []
for _ in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ):
"""simple docstring"""
A_ = []
for step in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase ,"schedule.bin" )
torch.save(scheduler.state_dict() ,__UpperCamelCase )
A_ = torch.load(__UpperCamelCase )
scheduler.load_state_dict(__UpperCamelCase )
return lrs
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def __A ( self : Dict ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , )
for _ in range(1000 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_lowerCamelCase : Any = 1_0
def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ , A_ = data
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ = unwrap_schedule(UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule
A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : List[str] ):
A_ = fn
def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ):
return self.fn(*UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A ( self : Dict , UpperCAmelCase : List[str] ):
A_ = list(map(self , scheduler.lr_lambdas ) )
| 329 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a :List[Any] = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__a :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : int
_lowerCamelCase : str
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Dict ):
A_ = {}
A_ = []
A_ = 1
A_ = [1, 2]
A_ = {"a": 1, "b": 2}
A_ = {"a": [1, 2], "b": [3, 4]}
A_ = {"a": {"1": 1}, "b": 2}
A_ = {"a": 1, "b": 2, "c": 3, "d": 4}
A_ = {}
A_ = []
A_ = 2
A_ = [2, 3]
A_ = {"a": 2, "b": 3}
A_ = {"a": [2, 3], "b": [4, 5]}
A_ = {"a": {"1": 2}, "b": 3}
A_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
A_ = 2
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
A_ = {"a": 2, "b": 0, "c": 2}
A_ = {
"a": np.eye(2 ).astype(UpperCAmelCase ),
"b": np.zeros(3 ).astype(UpperCAmelCase ),
"c": np.ones(2 ).astype(UpperCAmelCase ),
}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase )
def __A ( self : List[str] ):
A_ = {"a": 1, "b": 2}
A_ = {"a": 3, "b": 4}
A_ = {"a": 5, "b": 6}
A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase )
def __A ( self : Any ):
class _a :
"""simple docstring"""
_lowerCamelCase : int = 'bar'
A_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
A_ = {f'''{i}''': i for i in range(__UpperCamelCase )}
A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _a ( snake_case_ ):
"""simple docstring"""
@require_tf
def __A ( self : Union[str, Any] ):
import tensorflow as tf
from tensorflow.keras import layers
A_ = layers.Dense(2 )
def gen_random_output():
A_ = tf.random.uniform((1, 3) )
return model(UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __A ( self : Optional[int] ):
import torch
def gen_random_output():
A_ = torch.nn.Linear(3 , 2 )
A_ = torch.rand(1 , 3 )
return model(UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __A ( self : Any ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A_ = gen_random_output()
with temp_seed(42 ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" ,[{}] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" ,[
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] ,)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def __snake_case ( ):
"""simple docstring"""
A_ = A(x=1 ,y="foobar" )
A_ = {"x": 1, "y": "foobar"}
assert asdict(__UpperCamelCase ) == expected_output
A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]}
A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 ,y="foo" )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return text.split()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __snake_case ( ):
"""simple docstring"""
with Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A_ = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__UpperCamelCase ) == 4
| 329 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _a ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : int = StableDiffusionControlNetImgaImgPipeline
_lowerCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowerCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
_lowerCamelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __A ( self : Tuple ):
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
A_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
A_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ = CLIPTextModel(UpperCAmelCase__ )
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A_ = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : int=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
A_ = torch.manual_seed(UpperCAmelCase__ )
else:
A_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A_ = 2
A_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , )
A_ = floats_tensor(control_image.shape , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
A_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert("RGB" ).resize((64, 64) )
A_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __A ( self : Optional[Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __A ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __A ( self : Any ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _a ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = StableDiffusionControlNetImgaImgPipeline
_lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase : Tuple = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __A ( self : str ):
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCAmelCase : Any ):
if isinstance(UpperCAmelCase__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
A_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase__ )
torch.manual_seed(0 )
A_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase__ )
torch.manual_seed(0 )
A_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ = CLIPTextModel(UpperCAmelCase__ )
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A_ = MultiControlNetModel([controlneta, controlneta] )
A_ = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : int=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
A_ = torch.manual_seed(UpperCAmelCase__ )
else:
A_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A_ = 2
A_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , ),
]
A_ = floats_tensor(control_image[0].shape , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
A_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert("RGB" ).resize((64, 64) )
A_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __A ( self : Tuple ):
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
A_ = 10.0
A_ = 4
A_ = self.get_dummy_inputs(UpperCAmelCase__ )
A_ = steps
A_ = scale
A_ = pipe(**UpperCAmelCase__ )[0]
A_ = self.get_dummy_inputs(UpperCAmelCase__ )
A_ = steps
A_ = scale
A_ = pipe(**UpperCAmelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
A_ = self.get_dummy_inputs(UpperCAmelCase__ )
A_ = steps
A_ = scale
A_ = pipe(**UpperCAmelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
A_ = self.get_dummy_inputs(UpperCAmelCase__ )
A_ = steps
A_ = scale
A_ = pipe(**UpperCAmelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __A ( self : List[str] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __A ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __A ( self : List[str] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __A ( self : str ):
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCAmelCase__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Any ):
A_ = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
A_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=UpperCAmelCase__ , controlnet=UpperCAmelCase__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A_ = torch.Generator(device="cpu" ).manual_seed(0 )
A_ = "evil space-punk bird"
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
A_ = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
A_ = pipe(
UpperCAmelCase__ , UpperCAmelCase__ , control_image=UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type="np" , num_inference_steps=50 , strength=0.6 , )
A_ = output.images[0]
assert image.shape == (512, 512, 3)
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9E-2
| 361 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for char in word:
A_ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
for token in tokens:
A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A_ = list(__UpperCamelCase )
return word_list
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(__UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
A_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
A_ = "##" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ):
"""simple docstring"""
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
A_ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__a :Dict = parser.parse_args()
main(args)
| 329 | 0 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _a ( UpperCamelCase_ ):
def __A ( self : Dict ):
A_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(_a , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(_a , "num_encoder_blocks" ) )
class _a :
def __init__( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any]=13 , UpperCAmelCase : str=64 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Tuple=[2, 2, 2, 2] , UpperCAmelCase : str=[8, 4, 2, 1] , UpperCAmelCase : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase : Dict=[1, 4, 8, 16] , UpperCAmelCase : Dict=[1, 2, 4, 8] , UpperCAmelCase : int=True , UpperCAmelCase : int=True , UpperCAmelCase : Union[str, Any]="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : List[str]=0.02 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : str=None , ):
A_ = parent
A_ = batch_size
A_ = image_size
A_ = num_channels
A_ = num_encoder_blocks
A_ = sr_ratios
A_ = depths
A_ = hidden_sizes
A_ = downsampling_rates
A_ = num_attention_heads
A_ = is_training
A_ = use_labels
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = num_labels
A_ = scope
def __A ( self : Any ):
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self : Dict ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] ):
A_ = SegformerModel(config=_a )
model.to(_a )
model.eval()
A_ = model(_a )
A_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ):
A_ = self.num_labels
A_ = SegformerForSemanticSegmentation(_a )
model.to(_a )
model.eval()
A_ = model(_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A_ = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def __A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] ):
A_ = 1
A_ = SegformerForSemanticSegmentation(config=_a )
model.to(_a )
model.eval()
A_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_a )
A_ = model(_a , labels=_a )
self.parent.assertGreater(result.loss , 0.0 )
def __A ( self : Tuple ):
A_ = self.prepare_config_and_inputs()
A_ = config_and_inputs
A_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_lowerCamelCase : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Any = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase : List[str] = True
_lowerCamelCase : int = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Any = False
def __A ( self : List[str] ):
A_ = SegformerModelTester(self )
A_ = SegformerConfigTester(self , config_class=_a )
def __A ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __A ( self : Any ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_a )
def __A ( self : str ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_a )
@unittest.skip("SegFormer does not use inputs_embeds" )
def __A ( self : List[Any] ):
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def __A ( self : Tuple ):
pass
def __A ( self : Optional[int] ):
A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_a )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = True
A_ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(_a , _a ) )
A_ = outputs.attentions
A_ = sum(self.model_tester.depths )
self.assertEqual(len(_a ) , _a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(_a , _a ) )
A_ = outputs.attentions
self.assertEqual(len(_a ) , _a )
# verify the first attentions (first block, first layer)
A_ = (self.model_tester.image_size // 4) ** 2
A_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A_ = (self.model_tester.image_size // 32) ** 2
A_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A_ = len(_a )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 1 , len(_a ) )
A_ = outputs.attentions
self.assertEqual(len(_a ) , _a )
# verify the first attentions (first block, first layer)
A_ = (self.model_tester.image_size // 4) ** 2
A_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __A ( self : int ):
def check_hidden_states_output(UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ):
A_ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(_a , _a ) )
A_ = outputs.hidden_states
A_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_a ) , _a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(_a , _a , _a )
def __A ( self : str ):
if not self.model_tester.is_training:
return
A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_a ):
continue
A_ = model_class(_a )
model.to(_a )
model.train()
A_ = self._prepare_for_class(_a , _a , return_labels=_a )
A_ = model(**_a ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __A ( self : List[Any] ):
pass
@slow
def __A ( self : Dict ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = SegformerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __snake_case ( ):
"""simple docstring"""
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _a ( unittest.TestCase ):
@slow
def __A ( self : Tuple ):
A_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_a , align=_a , do_random_crop=_a )
A_ = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
_a )
A_ = prepare_img()
A_ = image_processor(images=_a , return_tensors="pt" )
A_ = encoded_inputs.pixel_values.to(_a )
with torch.no_grad():
A_ = model(_a )
A_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _a )
A_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _a , atol=1E-4 ) )
@slow
def __A ( self : Tuple ):
A_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_a , align=_a , do_random_crop=_a )
A_ = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(_a )
A_ = prepare_img()
A_ = image_processor(images=_a , return_tensors="pt" )
A_ = encoded_inputs.pixel_values.to(_a )
with torch.no_grad():
A_ = model(_a )
A_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _a )
A_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _a , atol=1E-1 ) )
@slow
def __A ( self : Dict ):
A_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_a , align=_a , do_random_crop=_a )
A_ = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
_a )
A_ = prepare_img()
A_ = image_processor(images=_a , return_tensors="pt" )
A_ = encoded_inputs.pixel_values.to(_a )
with torch.no_grad():
A_ = model(_a )
A_ = outputs.logits.detach().cpu()
A_ = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(500, 300)] )
A_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _a )
A_ = image_processor.post_process_semantic_segmentation(outputs=_a )
A_ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _a )
| 362 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __snake_case ( __UpperCamelCase : Features ):
"""simple docstring"""
A_ = np.inf
def set_batch_size(__UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and feature.dtype == "binary":
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCamelCase ,__UpperCamelCase )
return None if batch_size is np.inf else batch_size
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Tuple , ):
super().__init__(
UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , )
A_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES["parquet"][1]
A_ = Parquet(
cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , hash=UpperCAmelCase , **UpperCAmelCase , )
def __A ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : Union[PathLike, BinaryIO] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ):
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self : int ):
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
A_ = self._write(file_obj=UpperCAmelCase , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
return written
def __A ( self : Tuple , UpperCAmelCase : BinaryIO , UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ):
A_ = 0
A_ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase , **UpperCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
A_ = query_table(
table=self.dataset._data , key=slice(UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCAmelCase )
written += batch.nbytes
writer.close()
return written
| 329 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[Any] ):
A_ = 0
def __A ( self : Dict ):
A_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __A ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = Path(__SCREAMING_SNAKE_CASE ) / "preprocessor_config.json"
A_ = Path(__SCREAMING_SNAKE_CASE ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__SCREAMING_SNAKE_CASE , "w" ) , )
json.dump({"model_type": "clip"} , open(__SCREAMING_SNAKE_CASE , "w" ) )
A_ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __A ( self : Optional[int] ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = Path(__SCREAMING_SNAKE_CASE ) / "preprocessor_config.json"
A_ = Path(__SCREAMING_SNAKE_CASE ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__SCREAMING_SNAKE_CASE , "w" ) , )
json.dump({"model_type": "clip"} , open(__SCREAMING_SNAKE_CASE , "w" ) )
A_ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __A ( self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
A_ = Path(__SCREAMING_SNAKE_CASE ) / "preprocessor_config.json"
A_ = Path(__SCREAMING_SNAKE_CASE ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__SCREAMING_SNAKE_CASE , "w" ) , )
json.dump({"model_type": "clip"} , open(__SCREAMING_SNAKE_CASE , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
A_ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ).to_dict()
config_dict.pop("image_processor_type" )
A_ = CLIPImageProcessor(**__SCREAMING_SNAKE_CASE )
# save in new folder
model_config.save_pretrained(__SCREAMING_SNAKE_CASE )
config.save_pretrained(__SCREAMING_SNAKE_CASE )
A_ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
# make sure private variable is not incorrectly saved
A_ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __A ( self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = Path(__SCREAMING_SNAKE_CASE ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__SCREAMING_SNAKE_CASE , "w" ) , )
A_ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __A ( self : List[str] ):
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , "clip-base is not a local folder and is not a valid model identifier" ):
A_ = AutoImageProcessor.from_pretrained("clip-base" )
def __A ( self : Tuple ):
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
A_ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , revision="aaaaaa" )
def __A ( self : List[Any] ):
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
A_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def __A ( self : Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
A_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
A_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__SCREAMING_SNAKE_CASE )
A_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
A_ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def __A ( self : List[Any] ):
try:
AutoConfig.register("custom" , __SCREAMING_SNAKE_CASE )
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = Path(__SCREAMING_SNAKE_CASE ) / "preprocessor_config.json"
A_ = Path(__SCREAMING_SNAKE_CASE ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__SCREAMING_SNAKE_CASE , "w" ) , )
json.dump({"model_type": "clip"} , open(__SCREAMING_SNAKE_CASE , "w" ) )
A_ = CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
A_ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __A ( self : Union[str, Any] ):
class _a ( _A ):
"""simple docstring"""
_lowerCamelCase : List[str] = True
try:
AutoConfig.register("custom" , __SCREAMING_SNAKE_CASE )
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
A_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
A_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
A_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(__SCREAMING_SNAKE_CASE , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 363 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int = 4 ):
"""simple docstring"""
A_ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = matrix[::-1]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [x[::-1] for x in matrix]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 329 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__a :List[Any] = logging.get_logger(__name__)
__a :Union[str, Any] = {
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'dinat'
_lowerCamelCase : Dict = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : str , UpperCAmelCase : str=4 , UpperCAmelCase : Any=3 , UpperCAmelCase : str=64 , UpperCAmelCase : int=[3, 4, 6, 5] , UpperCAmelCase : Union[str, Any]=[2, 4, 8, 16] , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : int=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCAmelCase : Any=3.0 , UpperCAmelCase : int=True , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Dict=1E-5 , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : int , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ = patch_size
A_ = num_channels
A_ = embed_dim
A_ = depths
A_ = len(_SCREAMING_SNAKE_CASE )
A_ = num_heads
A_ = kernel_size
A_ = dilations
A_ = mlp_ratio
A_ = qkv_bias
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = drop_path_rate
A_ = hidden_act
A_ = layer_norm_eps
A_ = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
A_ = layer_scale_init_value
A_ = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )]
A_ = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 364 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Any = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 329 | 0 |
import csv
import tweepy
# Twitter API credentials
__a :Dict = ''
__a :Optional[Any] = ''
__a :int = ''
__a :Optional[Any] = ''
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = tweepy.OAuthHandler(_a ,_a )
auth.set_access_token(_a ,_a )
A_ = tweepy.API(_a )
# initialize a list to hold all the tweepy Tweets
A_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
A_ = api.user_timeline(screen_name=_a ,count=200 )
# save most recent tweets
alltweets.extend(_a )
# save the id of the oldest tweet less one
A_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_a ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
A_ = api.user_timeline(
screen_name=_a ,count=200 ,max_id=_a )
# save most recent tweets
alltweets.extend(_a )
# update the id of the oldest tweet less one
A_ = alltweets[-1].id - 1
print(F'''...{len(_a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
A_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' ,"w" ) as f:
A_ = csv.writer(_a )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 365 |
import itertools
import math
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( ):
"""simple docstring"""
A_ = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __snake_case ( __UpperCamelCase : int = 1_0001 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
__a :Tuple = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class _a ( snake_case_ ):
"""simple docstring"""
@staticmethod
def __A ( UpperCAmelCase : int ):
A_ = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=_lowercase , required=_lowercase , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=_lowercase , required=_lowercase , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=_lowercase , required=_lowercase , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=_lowercase , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=_lowercase , default=_lowercase , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=_lowercase )
def __init__( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , *UpperCAmelCase : int , ):
A_ = logging.get_logger("transformers-cli/converting" )
self._logger.info(f'''Loading model {model_type}''' )
A_ = model_type
A_ = tf_checkpoint
A_ = pytorch_dump_output
A_ = config
A_ = finetuning_task_name
def __A ( self : Any ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
if "ckpt" in self._tf_checkpoint.lower():
A_ = self._tf_checkpoint
A_ = ""
else:
A_ = self._tf_checkpoint
A_ = ""
convert_transfo_xl_checkpoint_to_pytorch(
_lowercase , self._config , self._pytorch_dump_output , _lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 366 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
| 329 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Optional[Any] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__a :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 367 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Dict = logging.get_logger(__name__)
__a :int = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'realm'
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
# Common config
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = retriever_proj_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_candidates
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
# Reader config
A_ = span_hidden_size
A_ = max_span_width
A_ = reader_layer_norm_eps
A_ = reader_beam_size
A_ = reader_seq_len
# Retrieval config
A_ = num_block_records
A_ = searcher_beam_size
| 329 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : int ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : int ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = tmp_path / """cache"""
A_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ = JsonDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ,keep_in_memory=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Tuple ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = tmp_path / """cache"""
A_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ = features.copy() if features else default_expected_features
A_ = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ = JsonDatasetReader(__UpperCamelCase ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] ,)
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = tmp_path / """cache"""
A_ = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
A_ = features.copy() if features else default_expected_features
A_ = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ = JsonDatasetReader(__UpperCamelCase ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
A_ = features.copy()
A_ = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ = tmp_path / """cache"""
A_ = JsonDatasetReader(__UpperCamelCase ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Dict ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = tmp_path / """cache"""
A_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ = JsonDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ,split=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase ,__UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" ,[str, list] )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if issubclass(__UpperCamelCase ,__UpperCamelCase ):
A_ = jsonl_path
elif issubclass(__UpperCamelCase ,__UpperCamelCase ):
A_ = [jsonl_path]
A_ = tmp_path / """cache"""
A_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ = JsonDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any]=("train",) ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
for split in splits:
A_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Dict ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = tmp_path / """cache"""
A_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ = JsonDatasetReader({"train": jsonl_path} ,cache_dir=__UpperCamelCase ,keep_in_memory=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = tmp_path / """cache"""
A_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ = features.copy() if features else default_expected_features
A_ = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ = JsonDatasetReader({"train": jsonl_path} ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Tuple ):
"""simple docstring"""
if split:
A_ = {split: jsonl_path}
else:
A_ = """train"""
A_ = {"""train""": jsonl_path, """test""": jsonl_path}
A_ = tmp_path / """cache"""
A_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ = JsonDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase ,__UpperCamelCase ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
return json.load(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
return [json.loads(__UpperCamelCase ) for line in buffer]
class _a :
"""simple docstring"""
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase ).write()
buffer.seek(0 )
A_ = load_json_function(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(exported_content[0] , __lowerCamelCase )
assert len(__lowerCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , orient=__lowerCamelCase ).write()
buffer.seek(0 )
A_ = load_json(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowerCamelCase ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __A ( self : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
A_ = load_json_function(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(exported_content[0] , __lowerCamelCase )
assert len(__lowerCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , orient=__lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
A_ = load_json(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowerCamelCase ) == 10
def __A ( self : str , UpperCAmelCase : str ):
with pytest.raises(__lowerCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def __A ( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str ):
A_ = tmp_path_factory.mktemp("data" ) / f'''test.json.{extension}'''
A_ = str(shared_datadir / f'''test_file.json.{extension}''' )
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , compression=__lowerCamelCase ).write()
with fsspec.open(__lowerCamelCase , "rb" , compression="infer" ) as f:
A_ = f.read()
with fsspec.open(__lowerCamelCase , "rb" , compression="infer" ) as f:
A_ = f.read()
assert exported_content == original_content
| 368 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = original_name.split("." )[0]
A_ = key.split("." )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
A_ = orig_block_num - offset
A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = OrderedDict()
A_ , A_ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
A_ = key.replace("network" ,"poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
A_ = key[: key.find("proj" )]
A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' )
A_ = key.replace("proj" ,"projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
A_ = "poolformer.encoder." + key
if "mlp.fc1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" )
if "mlp.fc2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" )
if "norm1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" )
if "norm2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" )
if "layer_scale_1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" )
if "layer_scale_2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" )
if "head" in key:
A_ = key.replace("head" ,"classifier" )
A_ = value
return new_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = PoolFormerConfig()
# set attributes based on model_name
A_ = "huggingface/label-files"
A_ = model_name[-3:]
A_ = 1000
A_ = "imagenet-1k-id2label.json"
A_ = (1, 1000)
# set config attributes
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ = [2, 2, 6, 2]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s24":
A_ = [4, 4, 12, 4]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s36":
A_ = [6, 6, 18, 6]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 1E-6
A_ = 0.9
elif size == "m36":
A_ = [6, 6, 18, 6]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
elif size == "m48":
A_ = [8, 8, 24, 8]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) )
# rename keys
A_ = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
A_ = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values
# forward pass
A_ = model(__UpperCamelCase )
A_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
A_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
A_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
A_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
A_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__a :int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
from math import pi, sqrt
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(a__ ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(a__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __snake_case ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(a__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__a :Optional[int] = 1.0
while num:
__a :List[str] = float(input('Gamma of: '))
print(F"gamma({num}) = {gamma(num)}")
print('\nEnter 0 to exit...')
| 369 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : torch.FloatTensor
_lowerCamelCase : Optional[torch.FloatTensor] = None
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A_ = []
for i in range(__UpperCamelCase ):
A_ = i / num_diffusion_timesteps
A_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) )
return torch.tensor(__UpperCamelCase ,dtype=torch.floataa )
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
A_ = betas_for_alpha_bar(UpperCAmelCase )
A_ = 1.0 - self.betas
A_ = torch.cumprod(self.alphas , dim=0 )
A_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A_ = 1.0
# setable values
A_ = None
A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() )
A_ = variance_type
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
A_ = num_inference_steps
A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ):
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) )
A_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A_ = variance.log()
A_ = beta.log()
A_ = (predicted_variance + 1) / 2
A_ = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ):
A_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 )
else:
A_ = None
# 1. compute alphas, betas
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
A_ = self.alphas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
A_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A_ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A_ = torch.clamp(
UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A_ = 0
if t > 0:
A_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device )
A_ = self._get_variance(
UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
A_ = variance
elif self.variance_type == "learned_range":
A_ = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
A_ = variance * variance_noise
A_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A_ = timesteps.to(original_samples.device )
A_ = alphas_cumprod[timesteps] ** 0.5
A_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_alpha_prod.unsqueeze(-1 )
A_ = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 329 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Dict = StableDiffusionDiffEditPipeline
_lowerCamelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
_lowerCamelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
_lowerCamelCase : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase : Union[str, Any] = frozenset([] )
def __A ( self : Any ):
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , )
A_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
A_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase , set_alpha_to_zero=UpperCAmelCase , )
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
A_ = CLIPTextModel(UpperCAmelCase )
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A_ = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any]=0 ):
A_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
A_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if str(UpperCAmelCase ).startswith("mps" ):
A_ = torch.manual_seed(UpperCAmelCase )
else:
A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
A_ = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __A ( self : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=0 ):
A_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
A_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert("RGB" )
if str(UpperCAmelCase ).startswith("mps" ):
A_ = torch.manual_seed(UpperCAmelCase )
else:
A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
A_ = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __A ( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : Any=0 ):
A_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
A_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert("RGB" )
if str(UpperCAmelCase ).startswith("mps" ):
A_ = torch.manual_seed(UpperCAmelCase )
else:
A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
A_ = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def __A ( self : List[str] ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
A_ = self.get_dummy_inputs(UpperCAmelCase )
A_ = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
A_ = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase , UpperCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
A_ = self.get_dummy_inputs(UpperCAmelCase )
A_ = pipe_loaded(**UpperCAmelCase )[0]
A_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCAmelCase , 1E-4 )
def __A ( self : List[Any] ):
A_ = "cpu"
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = self.get_dummy_mask_inputs(UpperCAmelCase )
A_ = pipe.generate_mask(**UpperCAmelCase )
A_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
A_ = np.array([0] * 9 )
A_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __A ( self : Tuple ):
A_ = "cpu"
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = self.get_dummy_inversion_inputs(UpperCAmelCase )
A_ = pipe.invert(**UpperCAmelCase ).images
A_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
A_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
A_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
def __A ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __A ( self : Dict ):
A_ = "cpu"
A_ = self.get_dummy_components()
A_ = {"beta_start": 0.00_085, "beta_end": 0.012, "beta_schedule": "scaled_linear"}
A_ = DPMSolverMultistepScheduler(**UpperCAmelCase )
A_ = DPMSolverMultistepInverseScheduler(**UpperCAmelCase )
A_ = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = self.get_dummy_inversion_inputs(UpperCAmelCase )
A_ = pipe.invert(**UpperCAmelCase ).images
A_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
A_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
A_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Tuple ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __A ( cls : Optional[Any] ):
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
A_ = raw_image.convert("RGB" ).resize((768, 768) )
A_ = raw_image
def __A ( self : List[str] ):
A_ = torch.manual_seed(0 )
A_ = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
A_ = DDIMScheduler.from_config(pipe.scheduler.config )
A_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = "a bowl of fruit"
A_ = "a bowl of pears"
A_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , )
A_ = pipe.invert(
prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase ).latents
A_ = pipe(
prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
A_ = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __A ( self : Tuple ):
A_ = torch.manual_seed(0 )
A_ = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
A_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = "a bowl of fruit"
A_ = "a bowl of pears"
A_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , )
A_ = pipe.invert(
prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase , num_inference_steps=25 , ).latents
A_ = pipe(
prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
A_ = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 370 |
from math import isqrt, loga
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,__UpperCamelCase ,__UpperCamelCase ):
A_ = False
return [i for i in range(2 ,__UpperCamelCase ) if is_prime[i]]
def __snake_case ( __UpperCamelCase : int = 80_0800 ,__UpperCamelCase : int = 80_0800 ):
"""simple docstring"""
A_ = degree * loga(__UpperCamelCase )
A_ = int(__UpperCamelCase )
A_ = calculate_prime_numbers(__UpperCamelCase )
A_ = 0
A_ = 0
A_ = len(__UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__a :str = get_logger()
__a :Union[str, Any] = None
class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Dict=None , UpperCAmelCase : Any=None , **UpperCAmelCase : Tuple ):
super().__init__(features=_SCREAMING_SNAKE_CASE )
import jax
from jaxlib.xla_client import Device
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(_SCREAMING_SNAKE_CASE )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
A_ = device if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
A_ = str(jax.devices()[0] )
A_ = jnp_array_kwargs
@staticmethod
def __A ( ):
import jax
return {str(_SCREAMING_SNAKE_CASE ): device for device in jax.devices()}
def __A ( self : str , UpperCAmelCase : Optional[int] ):
import jax
import jax.numpy as jnp
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and column:
if all(
isinstance(_SCREAMING_SNAKE_CASE , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_SCREAMING_SNAKE_CASE , axis=0 )
return column
def __A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] ):
import jax
import jax.numpy as jnp
if isinstance(_SCREAMING_SNAKE_CASE , (str, bytes, type(_SCREAMING_SNAKE_CASE )) ):
return value
elif isinstance(_SCREAMING_SNAKE_CASE , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A_ = {}
if isinstance(_SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A_ = {'''dtype''': jnp.intaa}
else:
A_ = {'''dtype''': jnp.intaa}
elif isinstance(_SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A_ = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
A_ = np.asarray(_SCREAMING_SNAKE_CASE )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs} )
def __A ( self : List[Any] , UpperCAmelCase : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_SCREAMING_SNAKE_CASE , "__array__" ) and not isinstance(_SCREAMING_SNAKE_CASE , jax.Array ):
A_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
return self._tensorize(_SCREAMING_SNAKE_CASE )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] ):
return map_nested(self._recursive_tensorize , _SCREAMING_SNAKE_CASE , map_list=_SCREAMING_SNAKE_CASE )
def __A ( self : List[Any] , UpperCAmelCase : Optional[int] ):
A_ = self.numpy_arrow_extractor().extract_row(_SCREAMING_SNAKE_CASE )
A_ = self.python_features_decoder.decode_row(_SCREAMING_SNAKE_CASE )
return self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
def __A ( self : Optional[Any] , UpperCAmelCase : Any ):
A_ = self.numpy_arrow_extractor().extract_column(_SCREAMING_SNAKE_CASE )
A_ = self.python_features_decoder.decode_column(_SCREAMING_SNAKE_CASE , pa_table.column_names[0] )
A_ = self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
A_ = self._consolidate(_SCREAMING_SNAKE_CASE )
return column
def __A ( self : int , UpperCAmelCase : Optional[int] ):
A_ = self.numpy_arrow_extractor().extract_batch(_SCREAMING_SNAKE_CASE )
A_ = self.python_features_decoder.decode_batch(_SCREAMING_SNAKE_CASE )
A_ = self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
for column_name in batch:
A_ = self._consolidate(batch[column_name] )
return batch
| 371 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a :str = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = RobertaPreLayerNormConfig.from_pretrained(
__UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) )
A_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A_ = tensor_value
A_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
# convert tokenizer
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 329 | 0 |
import math
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : Union[str, Any]=0 ): # a graph with Node 0,1,...,N-1
A_ = n
A_ = [
[math.inf for j in range(0 , UpperCAmelCase )] for i in range(0 , UpperCAmelCase )
] # adjacency matrix for weight
A_ = [
[math.inf for j in range(0 , UpperCAmelCase )] for i in range(0 , UpperCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def __A ( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : str ):
A_ = w
def __A ( self : str ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A_ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __A ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] ):
return self.dp[u][v]
if __name__ == "__main__":
__a :int = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 350 |
from maths.prime_factors import prime_factors
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Optional[int] = logging.get_logger(__name__)
__a :Union[str, Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_lowerCamelCase : Tuple = "pegasus"
_lowerCamelCase : Tuple = ["past_key_values"]
_lowerCamelCase : int = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[str] , UpperCAmelCase : Optional[int]=50265 , UpperCAmelCase : Dict=1024 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : int=4096 , UpperCAmelCase : int=16 , UpperCAmelCase : int=12 , UpperCAmelCase : Tuple=4096 , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : str=0.0 , UpperCAmelCase : int=0.0 , UpperCAmelCase : int=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Optional[int]=1024 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Dict=0.02 , UpperCAmelCase : Any=0 , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Any=0 , UpperCAmelCase : Any=1 , UpperCAmelCase : List[Any]=1 , **UpperCAmelCase : Union[str, Any] , ):
A_ = vocab_size
A_ = max_position_embeddings
A_ = d_model
A_ = encoder_ffn_dim
A_ = encoder_layers
A_ = encoder_attention_heads
A_ = decoder_ffn_dim
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = use_cache
A_ = encoder_layers
A_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , forced_eos_token_id=_snake_case , **_snake_case , )
@property
def __A ( self : Any ):
return self.encoder_attention_heads
@property
def __A ( self : Optional[Any] ):
return self.d_model
| 351 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a :int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
__a :Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
__a :str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
__a :List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
__a :Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 329 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _a ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Dict = StableUnCLIPPipeline
_lowerCamelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase : List[Any] = False
def __A ( self : int ):
A_ = 32
A_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
A_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase , projection_dim=UpperCAmelCase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
A_ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=UpperCAmelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
A_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase )
A_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
A_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase , layers_per_block=1 , upcast_attention=UpperCAmelCase , use_linear_projection=UpperCAmelCase , )
torch.manual_seed(0 )
A_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
A_ = AutoencoderKL()
A_ = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __A ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int=0 ):
if str(UpperCAmelCase ).startswith("mps" ):
A_ = torch.manual_seed(UpperCAmelCase )
else:
A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
A_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : Optional[int] ):
A_ = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase )
def __A ( self : Union[str, Any] ):
A_ = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
A_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A_ = torch.Generator(device="cpu" ).manual_seed(0 )
A_ = pipe("anime turle" , generator=UpperCAmelCase , output_type="np" )
A_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def __A ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
A_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A_ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
A_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a :Union[str, Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 329 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _a ( _lowerCamelCase ):
"""simple docstring"""
_lowerCamelCase : Any = 4_2
_lowerCamelCase : Tuple = jnp.floataa
_lowerCamelCase : Any = True
def __A ( self : Optional[Any] ):
super().setup()
A_ = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Tuple , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Any ):
A_ = super().__call__(*UpperCAmelCase , **UpperCAmelCase )
A_ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class _a ( _lowerCamelCase ):
"""simple docstring"""
_lowerCamelCase : List[Any] = FlaxBigBirdForNaturalQuestionsModule
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ):
"""simple docstring"""
def cross_entropy(__UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple=None ):
A_ = logits.shape[-1]
A_ = (labels[..., None] == jnp.arange(UpperCAmelCase_ )[None]).astype("f4" )
A_ = jax.nn.log_softmax(UpperCAmelCase_ ,axis=-1 )
A_ = -jnp.sum(labels * logits ,axis=-1 )
if reduction is not None:
A_ = reduction(UpperCAmelCase_ )
return loss
A_ = partial(UpperCAmelCase_ ,reduction=jnp.mean )
A_ = cross_entropy(UpperCAmelCase_ ,UpperCAmelCase_ )
A_ = cross_entropy(UpperCAmelCase_ ,UpperCAmelCase_ )
A_ = cross_entropy(UpperCAmelCase_ ,UpperCAmelCase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : int = 'google/bigbird-roberta-base'
_lowerCamelCase : str = 3_0_0_0
_lowerCamelCase : Optional[int] = 1_0_5_0_0
_lowerCamelCase : Union[str, Any] = 1_2_8
_lowerCamelCase : List[str] = 3
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : Any = 5
# tx_args
_lowerCamelCase : Optional[Any] = 3e-5
_lowerCamelCase : str = 0.0
_lowerCamelCase : Optional[Any] = 2_0_0_0_0
_lowerCamelCase : List[str] = 0.0_0_9_5
_lowerCamelCase : Optional[int] = 'bigbird-roberta-natural-questions'
_lowerCamelCase : List[Any] = 'training-expt'
_lowerCamelCase : Optional[int] = 'data/nq-training.jsonl'
_lowerCamelCase : Union[str, Any] = 'data/nq-validation.jsonl'
def __A ( self : Optional[int] ):
os.makedirs(self.base_dir , exist_ok=UpperCAmelCase )
A_ = os.path.join(self.base_dir , self.save_dir )
A_ = self.batch_size_per_device * jax.device_count()
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : List[str] = 4_2
_lowerCamelCase : int = 4_0_9_6 # no dynamic padding on TPUs
def __call__( self : Union[str, Any] , UpperCAmelCase : Optional[int] ):
A_ = self.collate_fn(UpperCAmelCase )
A_ = jax.tree_util.tree_map(UpperCAmelCase , UpperCAmelCase )
return batch
def __A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] ):
A_ , A_ = self.fetch_inputs(features["input_ids"] )
A_ = {
"input_ids": jnp.array(UpperCAmelCase , dtype=jnp.intaa ),
"attention_mask": jnp.array(UpperCAmelCase , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def __A ( self : List[Any] , UpperCAmelCase : list ):
A_ = [self._fetch_inputs(UpperCAmelCase ) for ids in input_ids]
return zip(*UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : list ):
A_ = [1 for _ in range(len(UpperCAmelCase ) )]
while len(UpperCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : List[Any]=None ):
"""simple docstring"""
if seed is not None:
A_ = dataset.shuffle(seed=UpperCAmelCase_ )
for i in range(len(UpperCAmelCase_ ) // batch_size ):
A_ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCAmelCase_ )
@partial(jax.pmap ,axis_name="batch" )
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,**__UpperCamelCase : Optional[int] ):
"""simple docstring"""
def loss_fn(__UpperCamelCase : Tuple ):
A_ = model_inputs.pop("start_labels" )
A_ = model_inputs.pop("end_labels" )
A_ = model_inputs.pop("pooled_labels" )
A_ = state.apply_fn(**UpperCAmelCase_ ,params=UpperCAmelCase_ ,dropout_rng=UpperCAmelCase_ ,train=UpperCAmelCase_ )
A_ , A_ , A_ = outputs
return state.loss_fn(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,)
A_ , A_ = jax.random.split(UpperCAmelCase_ )
A_ = jax.value_and_grad(UpperCAmelCase_ )
A_ , A_ = grad_fn(state.params )
A_ = jax.lax.pmean({"loss": loss} ,axis_name="batch" )
A_ = jax.lax.pmean(UpperCAmelCase_ ,"batch" )
A_ = state.apply_gradients(grads=UpperCAmelCase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap ,axis_name="batch" )
def __snake_case ( __UpperCamelCase : List[Any] ,**__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = model_inputs.pop("start_labels" )
A_ = model_inputs.pop("end_labels" )
A_ = model_inputs.pop("pooled_labels" )
A_ = state.apply_fn(**UpperCAmelCase_ ,params=state.params ,train=UpperCAmelCase_ )
A_ , A_ , A_ = outputs
A_ = state.loss_fn(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
A_ = jax.lax.pmean({"loss": loss} ,axis_name="batch" )
return metrics
class _a ( train_state.TrainState ):
"""simple docstring"""
_lowerCamelCase : Any = struct.field(pytree_node=_lowerCamelCase )
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : List[Any] = 4_2
_lowerCamelCase : str = 4_2
_lowerCamelCase : Optional[Any] = 4_2
_lowerCamelCase : int = 4_2
_lowerCamelCase : List[Any] = 4_2
_lowerCamelCase : List[str] = 4_2
_lowerCamelCase : Optional[int] = None
def __A ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]=None ):
A_ = model.params
A_ = TrainState.create(
apply_fn=model.__call__ , params=UpperCAmelCase , tx=UpperCAmelCase , loss_fn=UpperCAmelCase , )
if ckpt_dir is not None:
A_ , A_ , A_ , A_ , A_ = restore_checkpoint(UpperCAmelCase , UpperCAmelCase )
A_ = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
A_ , A_ = build_tx(**UpperCAmelCase )
A_ = train_state.TrainState(
step=UpperCAmelCase , apply_fn=model.__call__ , params=UpperCAmelCase , tx=UpperCAmelCase , opt_state=UpperCAmelCase , )
A_ = args
A_ = data_collator
A_ = lr
A_ = params
A_ = jax_utils.replicate(UpperCAmelCase )
return state
def __A ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int ):
A_ = self.args
A_ = len(UpperCAmelCase ) // args.batch_size
A_ = jax.random.PRNGKey(0 )
A_ = jax.random.split(UpperCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
A_ = jnp.array(0 , dtype=jnp.floataa )
A_ = get_batched_dataset(UpperCAmelCase , args.batch_size , seed=UpperCAmelCase )
A_ = 0
for batch in tqdm(UpperCAmelCase , total=UpperCAmelCase , desc=f'''Running EPOCH-{epoch}''' ):
A_ = self.data_collator(UpperCAmelCase )
A_ , A_ , A_ = self.train_step_fn(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
A_ = jax_utils.unreplicate(state.step )
A_ = running_loss.item() / i
A_ = self.scheduler_fn(state_step - 1 )
A_ = self.evaluate(UpperCAmelCase , UpperCAmelCase )
A_ = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(UpperCAmelCase ) )
self.logger.log(UpperCAmelCase , commit=UpperCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] ):
A_ = get_batched_dataset(UpperCAmelCase , self.args.batch_size )
A_ = len(UpperCAmelCase ) // self.args.batch_size
A_ = jnp.array(0 , dtype=jnp.floataa )
A_ = 0
for batch in tqdm(UpperCAmelCase , total=UpperCAmelCase , desc="Evaluating ... " ):
A_ = self.data_collator(UpperCAmelCase )
A_ = self.val_step_fn(UpperCAmelCase , **UpperCAmelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict ):
A_ = jax_utils.unreplicate(UpperCAmelCase )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=" ... " )
self.model_save_fn(UpperCAmelCase , params=state.params )
with open(os.path.join(UpperCAmelCase , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(UpperCAmelCase , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(UpperCAmelCase , "data_collator.joblib" ) )
with open(os.path.join(UpperCAmelCase , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , UpperCAmelCase )
print("DONE" )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Any ):
"""simple docstring"""
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' ,end=" ... " )
with open(os.path.join(UpperCAmelCase_ ,"flax_model.msgpack" ) ,"rb" ) as f:
A_ = from_bytes(state.params ,f.read() )
with open(os.path.join(UpperCAmelCase_ ,"opt_state.msgpack" ) ,"rb" ) as f:
A_ = from_bytes(state.opt_state ,f.read() )
A_ = joblib.load(os.path.join(UpperCAmelCase_ ,"args.joblib" ) )
A_ = joblib.load(os.path.join(UpperCAmelCase_ ,"data_collator.joblib" ) )
with open(os.path.join(UpperCAmelCase_ ,"training_state.json" ) ,"r" ) as f:
A_ = json.load(UpperCAmelCase_ )
A_ = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = num_train_steps - warmup_steps
A_ = optax.linear_schedule(init_value=UpperCAmelCase_ ,end_value=UpperCAmelCase_ ,transition_steps=UpperCAmelCase_ )
A_ = optax.linear_schedule(init_value=UpperCAmelCase_ ,end_value=1E-7 ,transition_steps=UpperCAmelCase_ )
A_ = optax.join_schedules(schedules=[warmup_fn, decay_fn] ,boundaries=[warmup_steps] )
return lr
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
def weight_decay_mask(__UpperCamelCase : Union[str, Any] ):
A_ = traverse_util.flatten_dict(UpperCAmelCase_ )
A_ = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCAmelCase_ )
A_ = scheduler_fn(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
A_ = optax.adamw(learning_rate=UpperCAmelCase_ ,weight_decay=UpperCAmelCase_ ,mask=UpperCAmelCase_ )
return tx, lr
| 353 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ):
return False
return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ):
"""simple docstring"""
A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A_ = is_compiled_module(__UpperCamelCase )
if is_compiled:
A_ = model
A_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = model.module
if not keep_fpaa_wrapper:
A_ = getattr(__UpperCamelCase ,"forward" )
A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase ,"__wrapped__" ):
A_ = forward.__wrapped__
if forward == original_forward:
break
A_ = forward
if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ):
convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase )
if is_compiled:
A_ = model
A_ = compiled_model
return model
def __snake_case ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase ,__UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase ,__UpperCamelCase )
@contextmanager
def __snake_case ( **__UpperCamelCase : Any ):
"""simple docstring"""
for key, value in kwargs.items():
A_ = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ):
A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,"__qualname__" ):
return obj.__qualname__
if hasattr(__UpperCamelCase ,"__name__" ):
return obj.__name__
return str(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
for key, value in source.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = destination.setdefault(__UpperCamelCase ,{} )
merge_dicts(__UpperCamelCase ,__UpperCamelCase )
else:
A_ = value
return destination
def __snake_case ( __UpperCamelCase : int = None ):
"""simple docstring"""
if port is None:
A_ = 2_9500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 329 | 0 |
from __future__ import annotations
import os
from typing import Any
import requests
__a :List[str] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__a :Tuple = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__a :Tuple = os.environ.get('USER_TOKEN', '')
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = {
"Authorization": f'''token {auth_token}''',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(_lowerCAmelCase ,headers=_lowerCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"{key}: {value}")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 354 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ):
A_ = tempfile.mkdtemp()
A_ = BlipImageProcessor()
A_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
A_ = BlipProcessor(UpperCAmelCase , UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer
def __A ( self : Optional[Any] , **UpperCAmelCase : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def __A ( self : Any ):
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ):
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : Any ):
A_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
A_ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCAmelCase , return_tensors="np" )
A_ = processor(images=UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self : int ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = processor(text=UpperCAmelCase )
A_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Tuple ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def __A ( self : Any ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(UpperCAmelCase )
A_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 329 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = torch.load(__UpperCamelCase ,map_location="cpu" )
A_ = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
A_ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
A_ = v
else:
A_ = v
A_ = chkpt["params"]
A_ = {n: v for n, v in config.items() if not isinstance(__UpperCamelCase ,(torch.FloatTensor, numpy.ndarray) )}
A_ = chkpt["dico_word2id"]
A_ = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" ,"" ): i for s, i in vocab.items()}
# Save pytorch-model
A_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
A_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
A_ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__UpperCamelCase ,__UpperCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(__UpperCamelCase ,indent=2 ) + "\n" )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(__UpperCamelCase ,indent=2 ) + "\n" )
if __name__ == "__main__":
__a :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 355 |
import math
__a :Union[str, Any] = 10
__a :Union[str, Any] = 7
__a :int = BALLS_PER_COLOUR * NUM_COLOURS
def __snake_case ( __UpperCamelCase : int = 20 ):
"""simple docstring"""
A_ = math.comb(__UpperCamelCase ,__UpperCamelCase )
A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase )
A_ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 329 | 0 |
__a :Union[str, Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = set()
# keep track of all the paths to be checked
A_ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A_ = queue.pop(0 )
# get the last node from the path
A_ = path[-1]
if node not in explored:
A_ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A_ = list(__UpperCAmelCase )
new_path.append(__UpperCAmelCase )
queue.append(__UpperCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCAmelCase )
# in case there's no path between the 2 nodes
return []
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A_ = [start]
A_ = set(__UpperCAmelCase )
# Keep tab on distances from `start` node.
A_ = {start: 0, target: -1}
while queue:
A_ = queue.pop(0 )
if node == target:
A_ = (
dist[node] if dist[target] == -1 else min(dist[target] ,dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCAmelCase )
queue.append(__UpperCAmelCase )
A_ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 356 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a :Optional[Any] = logging.get_logger(__name__)
__a :Any = {'vocab_file': 'vocab.txt'}
__a :Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a :List[str] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a :List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = ConvBertTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 329 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : List[str] ):
A_ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
A_ = len(lowerCamelCase__ ) - 1
def __A ( self : str , UpperCAmelCase : Tuple ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
A_ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , lowerCamelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCamelCase__ ) , 5 ) == 1
return output_values
def __A ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
A_ = self.basis_function(lowerCamelCase__ )
A_ = 0.0
A_ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self : int , UpperCAmelCase : List[Any] = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
A_ = [] # x coordinates of points to plot
A_ = [] # y coordinates of points to plot
A_ = 0.0
while t <= 1:
A_ = self.bezier_curve_function(lowerCamelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
A_ = [i[0] for i in self.list_of_points]
A_ = [i[1] for i in self.list_of_points]
plt.plot(
lowerCamelCase__ , lowerCamelCase__ , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(lowerCamelCase__ , lowerCamelCase__ , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 357 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a :Optional[Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 329 | 0 |
from math import pow, sqrt
def __snake_case ( *__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = len(A_ ) > 0 and all(value > 0.0 for value in values )
return result
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(A_ ,A_ )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(A_ ,A_ ,A_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : List[str] ,__UpperCamelCase : Tuple ):
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(A_ ,A_ ,A_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a ,2 ) ,6 )
if validate(A_ ,A_ ,A_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a ,2 ) / molar_mass ,6 )
if validate(A_ ,A_ ,A_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
| 358 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __A ( self : List[str] ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@require_tf
def __A ( self : int ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __A ( self : Any ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __A ( self : Optional[Any] ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 329 | 0 |
import re
def __snake_case ( __UpperCamelCase : Dict ):
if len(re.findall("[ATCG]" ,_lowercase ) ) != len(_lowercase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" ,"TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ):
"""simple docstring"""
A_ = []
for _ in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ):
"""simple docstring"""
A_ = []
for step in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase ,"schedule.bin" )
torch.save(scheduler.state_dict() ,__UpperCamelCase )
A_ = torch.load(__UpperCamelCase )
scheduler.load_state_dict(__UpperCamelCase )
return lrs
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def __A ( self : Dict ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , )
for _ in range(1000 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_lowerCamelCase : Any = 1_0
def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ , A_ = data
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ = unwrap_schedule(UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule
A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : List[str] ):
A_ = fn
def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ):
return self.fn(*UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A ( self : Dict , UpperCAmelCase : List[str] ):
A_ = list(map(self , scheduler.lr_lambdas ) )
| 329 | 0 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Union[str, Any]=10 ):
"""simple docstring"""
A_ = []
for _ in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Dict=10 ):
"""simple docstring"""
A_ = []
for step in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase ,"schedule.bin" )
torch.save(scheduler.state_dict() ,__UpperCamelCase )
A_ = torch.load(__UpperCamelCase )
scheduler.load_state_dict(__UpperCamelCase )
return lrs
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : str ):
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE )
def __A ( self : str ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A_ = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def __A ( self : Optional[Any] ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , )
for _ in range(1000 ):
A_ = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Any = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_lowerCamelCase : str = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_lowerCamelCase : Union[str, Any] = 1_0
def __A ( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Any=None ):
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE )
def __A ( self : List[str] ):
A_ = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ = data
A_ = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListAlmostEqual(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
A_ = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule
A_ = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' )
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : Any ):
A_ = fn
def __call__( self : Tuple , *UpperCAmelCase : int , **UpperCAmelCase : str ):
return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def __A ( self : Dict , UpperCAmelCase : Optional[int] ):
A_ = list(map(self , scheduler.lr_lambdas ) )
| 360 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : int
_lowerCamelCase : str
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Dict ):
A_ = {}
A_ = []
A_ = 1
A_ = [1, 2]
A_ = {"a": 1, "b": 2}
A_ = {"a": [1, 2], "b": [3, 4]}
A_ = {"a": {"1": 1}, "b": 2}
A_ = {"a": 1, "b": 2, "c": 3, "d": 4}
A_ = {}
A_ = []
A_ = 2
A_ = [2, 3]
A_ = {"a": 2, "b": 3}
A_ = {"a": [2, 3], "b": [4, 5]}
A_ = {"a": {"1": 2}, "b": 3}
A_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
A_ = 2
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
A_ = {"a": 2, "b": 0, "c": 2}
A_ = {
"a": np.eye(2 ).astype(UpperCAmelCase ),
"b": np.zeros(3 ).astype(UpperCAmelCase ),
"c": np.ones(2 ).astype(UpperCAmelCase ),
}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase )
def __A ( self : List[str] ):
A_ = {"a": 1, "b": 2}
A_ = {"a": 3, "b": 4}
A_ = {"a": 5, "b": 6}
A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase )
def __A ( self : Any ):
class _a :
"""simple docstring"""
_lowerCamelCase : int = 'bar'
A_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
A_ = {f'''{i}''': i for i in range(__UpperCamelCase )}
A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _a ( snake_case_ ):
"""simple docstring"""
@require_tf
def __A ( self : Union[str, Any] ):
import tensorflow as tf
from tensorflow.keras import layers
A_ = layers.Dense(2 )
def gen_random_output():
A_ = tf.random.uniform((1, 3) )
return model(UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __A ( self : Optional[int] ):
import torch
def gen_random_output():
A_ = torch.nn.Linear(3 , 2 )
A_ = torch.rand(1 , 3 )
return model(UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __A ( self : Any ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A_ = gen_random_output()
with temp_seed(42 ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" ,[{}] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" ,[
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] ,)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def __snake_case ( ):
"""simple docstring"""
A_ = A(x=1 ,y="foobar" )
A_ = {"x": 1, "y": "foobar"}
assert asdict(__UpperCamelCase ) == expected_output
A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]}
A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 ,y="foo" )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return text.split()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __snake_case ( ):
"""simple docstring"""
with Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A_ = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__UpperCamelCase ) == 4
| 329 | 0 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__a :Optional[Any] = 5_0000
__a :Optional[int] = 5000
__a , __a :Optional[int] = os.path.split(__file__)
__a :Any = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def __snake_case ( __UpperCamelCase : datasets.Dataset ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
for i in range(_a ):
A_ = dataset[i]
@get_duration
def __snake_case ( __UpperCamelCase : datasets.Dataset ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
for i in range(0 ,len(_a ) ,_a ):
A_ = dataset[i : i + batch_size]
@get_duration
def __snake_case ( __UpperCamelCase : datasets.Dataset ,__UpperCamelCase : List[str] ,__UpperCamelCase : Tuple ):
"""simple docstring"""
with dataset.formatted_as(type=_a ):
for i in range(_a ):
A_ = dataset[i]
@get_duration
def __snake_case ( __UpperCamelCase : datasets.Dataset ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
with dataset.formatted_as(type=_a ):
for i in range(0 ,_a ,_a ):
A_ = dataset[i : i + batch_size]
def __snake_case ( ):
"""simple docstring"""
A_ = {"num examples": SPEED_TEST_N_EXAMPLES}
A_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
A_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
A_ = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
A_ = generate_example_dataset(
os.path.join(_a ,"dataset.arrow" ) ,_a ,num_examples=_a ,seq_shapes={"list": (100,)} ,)
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ ,str(_a ) )
A_ = func(_a ,**_a )
print("shuffling dataset" )
A_ = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " ,func.__name__ ,str(_a ) )
A_ = func(
_a ,**_a )
with open(_a ,"wb" ) as f:
f.write(json.dumps(_a ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 361 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for char in word:
A_ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
for token in tokens:
A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A_ = list(__UpperCamelCase )
return word_list
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(__UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
A_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
A_ = "##" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ):
"""simple docstring"""
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
A_ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__a :Dict = parser.parse_args()
main(args)
| 329 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( lowerCamelCase__ ):
_lowerCamelCase : List[str] = (DDIMParallelScheduler,)
_lowerCamelCase : Dict = (("""eta""", 0.0), ("""num_inference_steps""", 5_0))
def __A ( self : List[str] , **UpperCAmelCase : Optional[int] ):
A_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**snake_case__ )
return config
def __A ( self : List[str] , **UpperCAmelCase : Optional[int] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**snake_case__ )
A_ = scheduler_class(**snake_case__ )
A_ = 10, 0.0
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for t in scheduler.timesteps:
A_ = model(snake_case__ , snake_case__ )
A_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def __A ( self : str ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def __A ( self : Dict ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case__ )
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(steps_offset=1 )
A_ = scheduler_class(**snake_case__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __A ( self : List[str] ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def __A ( self : List[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def __A ( self : str ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def __A ( self : List[Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def __A ( self : int ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=snake_case__ )
def __A ( self : Dict ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=snake_case__ )
def __A ( self : Dict ):
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , )
def __A ( self : int ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=snake_case__ )
def __A ( self : Optional[int] ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=snake_case__ , num_inference_steps=snake_case__ )
def __A ( self : Any ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=snake_case__ , eta=snake_case__ )
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def __A ( self : Optional[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**snake_case__ )
A_ = 10, 0.0
scheduler.set_timesteps(snake_case__ )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = self.dummy_sample_deter + 0.1
A_ = self.dummy_sample_deter - 0.1
A_ = samplea.shape[0]
A_ = torch.stack([samplea, samplea, samplea] , dim=0 )
A_ = torch.arange(snake_case__ )[0:3, None].repeat(1 , snake_case__ )
A_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A_ = scheduler.batch_step_no_noise(snake_case__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case__ )
A_ = torch.sum(torch.abs(snake_case__ ) )
A_ = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1E-2
assert abs(result_mean.item() - 0.4_982 ) < 1E-3
def __A ( self : Optional[Any] ):
A_ = self.full_loop()
A_ = torch.sum(torch.abs(snake_case__ ) )
A_ = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 172.0_067 ) < 1E-2
assert abs(result_mean.item() - 0.223_967 ) < 1E-3
def __A ( self : List[Any] ):
A_ = self.full_loop(prediction_type="v_prediction" )
A_ = torch.sum(torch.abs(snake_case__ ) )
A_ = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 52.5_302 ) < 1E-2
assert abs(result_mean.item() - 0.0_684 ) < 1E-3
def __A ( self : List[str] ):
A_ = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 )
A_ = torch.sum(torch.abs(snake_case__ ) )
A_ = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 149.8_295 ) < 1E-2
assert abs(result_mean.item() - 0.1_951 ) < 1E-3
def __A ( self : Any ):
A_ = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 )
A_ = torch.sum(torch.abs(snake_case__ ) )
A_ = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 149.0_784 ) < 1E-2
assert abs(result_mean.item() - 0.1_941 ) < 1E-3
| 362 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __snake_case ( __UpperCamelCase : Features ):
"""simple docstring"""
A_ = np.inf
def set_batch_size(__UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and feature.dtype == "binary":
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCamelCase ,__UpperCamelCase )
return None if batch_size is np.inf else batch_size
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Tuple , ):
super().__init__(
UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , )
A_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES["parquet"][1]
A_ = Parquet(
cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , hash=UpperCAmelCase , **UpperCAmelCase , )
def __A ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : Union[PathLike, BinaryIO] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ):
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self : int ):
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
A_ = self._write(file_obj=UpperCAmelCase , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
return written
def __A ( self : Tuple , UpperCAmelCase : BinaryIO , UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ):
A_ = 0
A_ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase , **UpperCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
A_ = query_table(
table=self.dataset._data , key=slice(UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCAmelCase )
written += batch.nbytes
writer.close()
return written
| 329 | 0 |
from math import factorial
__a :dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(_lowercase ,_lowercase ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_lowercase ) )
def __snake_case ( __UpperCamelCase : int = 60 ,__UpperCamelCase : int = 100_0000 ):
"""simple docstring"""
if not isinstance(_lowercase ,_lowercase ) or not isinstance(_lowercase ,_lowercase ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
A_ = 0
# the cached sizes of the previous chains
A_ = {}
for start_chain_element in range(1 ,_lowercase ):
# The temporary set will contain the elements of the chain
A_ = set()
A_ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
A_ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_lowercase )
chain_set_length += 1
A_ = digit_factorial_sum(_lowercase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
A_ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution()}")
| 363 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int = 4 ):
"""simple docstring"""
A_ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = matrix[::-1]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [x[::-1] for x in matrix]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 329 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Union[str, Any] = logging.get_logger(__name__)
__a :int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _a ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCamelCase : Dict = "dpr"
def __init__( self : str , UpperCAmelCase : Tuple=30522 , UpperCAmelCase : Union[str, Any]=768 , UpperCAmelCase : List[Any]=12 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : List[Any]=3072 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=512 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : str=0.02 , UpperCAmelCase : List[Any]=1E-12 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Optional[int]="absolute" , UpperCAmelCase : List[str] = 0 , **UpperCAmelCase : Optional[int] , ):
super().__init__(pad_token_id=a__ , **a__ )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = projection_dim
A_ = position_embedding_type
| 364 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Any = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 329 | 0 |
import os
import sys
import unittest
__a :List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a :Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ):
A_ = find_backend(" if not is_torch_available():" )
self.assertEqual(lowerCAmelCase_ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A_ = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(lowerCAmelCase_ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A_ = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(lowerCAmelCase_ , "torch_and_transformers_and_onnx" )
def __A ( self : Optional[Any] ):
A_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , lowerCAmelCase_ )
self.assertIn("torch_and_transformers" , lowerCAmelCase_ )
self.assertIn("flax_and_transformers" , lowerCAmelCase_ )
self.assertIn("torch_and_transformers_and_onnx" , lowerCAmelCase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def __A ( self : Any ):
A_ = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(lowerCAmelCase_ , "\nCONSTANT = None\n" )
A_ = create_dummy_object("function" , "'torch'" )
self.assertEqual(
lowerCAmelCase_ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A_ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A_ = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __A ( self : Tuple ):
A_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A_ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , lowerCAmelCase_ )
| 365 |
import itertools
import math
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( ):
"""simple docstring"""
A_ = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __snake_case ( __UpperCamelCase : int = 1_0001 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__a :Any = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__a :Union[str, Any] = TaTokenizerFast
__a :Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[str] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__a :Any = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 366 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
| 329 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _a :
"""simple docstring"""
_lowerCamelCase : Dict = XGLMConfig
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = 'gelu'
def __init__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : List[Any]=14 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : str=True , UpperCAmelCase : int=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int=99 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : Any=2 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : str=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : int=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=0.02 , ):
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_labels
A_ = vocab_size
A_ = d_model
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = ffn_dim
A_ = activation_function
A_ = activation_dropout
A_ = attention_dropout
A_ = max_position_embeddings
A_ = initializer_range
A_ = None
A_ = 0
A_ = 2
A_ = 1
def __A ( self : Union[str, Any] ):
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def __A ( self : Any ):
A_ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = self.get_config()
A_ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __A ( self : List[str] ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCamelCase__ , )
def __A ( self : Any ):
A_ = self.prepare_config_and_inputs()
(
A_
) = config_and_inputs
A_ = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class _a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Any = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowerCamelCase : Tuple = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowerCamelCase : Optional[Any] = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Union[str, Any] = False
def __A ( self : Optional[int] ):
A_ = TFXGLMModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=37 )
def __A ( self : str ):
self.config_tester.run_common_tests()
@slow
def __A ( self : Optional[int] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFXGLMModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def __A ( self : int ):
super().test_resize_token_embeddings()
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : str , UpperCAmelCase : Any=True ):
A_ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
A_ = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
A_ = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
A_ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ )
@slow
def __A ( self : Optional[Any] ):
A_ = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
A_ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
A_ = tokenizer("Today is a nice day and" , return_tensors="tf" )
A_ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
A_ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , seed=[7, 0] )
A_ = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase__ )
A_ = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def __A ( self : Dict ):
A_ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
A_ = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
A_ = '''left'''
# use different length sentences to test batching
A_ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
A_ = tokenizer(UpperCamelCase__ , return_tensors="tf" , padding=UpperCamelCase__ )
A_ = inputs['''input_ids''']
A_ = model.generate(input_ids=UpperCamelCase__ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
A_ = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
A_ = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=12 )
A_ = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
A_ = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=12 )
A_ = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
A_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase__ )
A_ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase__ )
A_ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [non_padded_sentence, padded_sentence] )
| 367 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Dict = logging.get_logger(__name__)
__a :int = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'realm'
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
# Common config
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = retriever_proj_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_candidates
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
# Reader config
A_ = span_hidden_size
A_ = max_span_width
A_ = reader_layer_norm_eps
A_ = reader_beam_size
A_ = reader_seq_len
# Retrieval config
A_ = num_block_records
A_ = searcher_beam_size
| 329 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = args.pruning_method
A_ = args.threshold
A_ = args.model_name_or_path.rstrip("/" )
A_ = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
A_ = torch.load(os.path.join(lowerCamelCase_ ,"pytorch_model.bin" ) )
A_ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A_ = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
A_ = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
A_ = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
A_ = MagnitudeBinarizer.apply(inputs=lowerCamelCase_ ,threshold=lowerCamelCase_ )
A_ = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A_ = name[:-6]
A_ = model[f'''{prefix_}mask_scores''']
A_ = TopKBinarizer.apply(lowerCamelCase_ ,lowerCamelCase_ )
A_ = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A_ = name[:-6]
A_ = model[f'''{prefix_}mask_scores''']
A_ = ThresholdBinarizer.apply(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
A_ = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A_ = name[:-6]
A_ = model[f'''{prefix_}mask_scores''']
A_ = -0.1, 1.1
A_ = torch.sigmoid(lowerCamelCase_ )
A_ = s * (r - l) + l
A_ = s_bar.clamp(min=0.0 ,max=1.0 )
A_ = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
A_ = os.path.join(
os.path.dirname(lowerCamelCase_ ) ,f'''bertarized_{os.path.basename(lowerCamelCase_ )}''' )
if not os.path.isdir(lowerCamelCase_ ):
shutil.copytree(lowerCamelCase_ ,lowerCamelCase_ )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(lowerCamelCase_ ,os.path.join(lowerCamelCase_ ,"pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
__a :Dict = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
__a :int = parser.parse_args()
main(args)
| 368 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = original_name.split("." )[0]
A_ = key.split("." )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
A_ = orig_block_num - offset
A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = OrderedDict()
A_ , A_ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
A_ = key.replace("network" ,"poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
A_ = key[: key.find("proj" )]
A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' )
A_ = key.replace("proj" ,"projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
A_ = "poolformer.encoder." + key
if "mlp.fc1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" )
if "mlp.fc2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" )
if "norm1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" )
if "norm2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" )
if "layer_scale_1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" )
if "layer_scale_2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" )
if "head" in key:
A_ = key.replace("head" ,"classifier" )
A_ = value
return new_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = PoolFormerConfig()
# set attributes based on model_name
A_ = "huggingface/label-files"
A_ = model_name[-3:]
A_ = 1000
A_ = "imagenet-1k-id2label.json"
A_ = (1, 1000)
# set config attributes
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ = [2, 2, 6, 2]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s24":
A_ = [4, 4, 12, 4]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s36":
A_ = [6, 6, 18, 6]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 1E-6
A_ = 0.9
elif size == "m36":
A_ = [6, 6, 18, 6]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
elif size == "m48":
A_ = [8, 8, 24, 8]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) )
# rename keys
A_ = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
A_ = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values
# forward pass
A_ = model(__UpperCamelCase )
A_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
A_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
A_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
A_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
A_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__a :int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = [[0 for _ in range(__UpperCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
A_ = 1
for n in range(m + 1 ):
for k in range(1 ,__UpperCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__a :int = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__a :List[Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 369 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : torch.FloatTensor
_lowerCamelCase : Optional[torch.FloatTensor] = None
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A_ = []
for i in range(__UpperCamelCase ):
A_ = i / num_diffusion_timesteps
A_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) )
return torch.tensor(__UpperCamelCase ,dtype=torch.floataa )
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
A_ = betas_for_alpha_bar(UpperCAmelCase )
A_ = 1.0 - self.betas
A_ = torch.cumprod(self.alphas , dim=0 )
A_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A_ = 1.0
# setable values
A_ = None
A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() )
A_ = variance_type
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
A_ = num_inference_steps
A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ):
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) )
A_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A_ = variance.log()
A_ = beta.log()
A_ = (predicted_variance + 1) / 2
A_ = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ):
A_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 )
else:
A_ = None
# 1. compute alphas, betas
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
A_ = self.alphas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
A_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A_ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A_ = torch.clamp(
UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A_ = 0
if t > 0:
A_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device )
A_ = self._get_variance(
UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
A_ = variance
elif self.variance_type == "learned_range":
A_ = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
A_ = variance * variance_noise
A_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A_ = timesteps.to(original_samples.device )
A_ = alphas_cumprod[timesteps] ** 0.5
A_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_alpha_prod.unsqueeze(-1 )
A_ = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 329 | 0 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__a :Dict = getLogger(__name__)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : int = 8 ,__UpperCamelCase : int = 1024 ,__UpperCamelCase : Union[str, Any]="val" ,__UpperCamelCase : Any=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : List[str]="summarization" ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Dict=1 ,__UpperCamelCase : Dict = None ,__UpperCamelCase : List[Any]="" ,**__UpperCamelCase : int ,):
"""simple docstring"""
A_ = str(snake_case_ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" ,rank=snake_case_ )
A_ = Path(snake_case_ )
A_ = save_dir.joinpath(f'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(snake_case_ )
A_ = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda()
if fpaa:
A_ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case_ ,snake_case_ ) # update config with task specific params
A_ = generate_kwargs.pop("num_beams" ,model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
A_ = num_return_sequences
A_ = AutoTokenizer.from_pretrained(snake_case_ )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
A_ = tokenizer.model_max_length
if prefix is None:
A_ = prefix or getattr(model.config ,"prefix" ,"" ) or ""
A_ = SeqaSeqDataset(
snake_case_ ,snake_case_ ,snake_case_ ,max_target_length=1024 ,type_path=snake_case_ ,n_obs=snake_case_ ,prefix=snake_case_ ,**snake_case_ ,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
A_ = ds.make_sortish_sampler(snake_case_ ,distributed=snake_case_ ,add_extra_examples=snake_case_ ,shuffle=snake_case_ )
A_ = DataLoader(snake_case_ ,sampler=snake_case_ ,batch_size=snake_case_ ,collate_fn=ds.collate_fn )
A_ = []
for batch in tqdm(snake_case_ ):
A_ = model.generate(
input_ids=batch["input_ids"].to(model.device ) ,attention_mask=batch["attention_mask"].to(model.device ) ,num_return_sequences=snake_case_ ,num_beams=snake_case_ ,**snake_case_ ,)
A_ = tokenizer.batch_decode(snake_case_ ,skip_special_tokens=snake_case_ ,clean_up_tokenization_spaces=snake_case_ )
A_ = batch["ids"]
if num_return_sequences > 1:
A_ = chunks(snake_case_ ,snake_case_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case_ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(snake_case_ ,snake_case_ )
return results, sampler.num_replicas
def __snake_case ( ):
"""simple docstring"""
A_ = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" ,type=snake_case_ ,help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" ,type=snake_case_ ,help="like facebook/bart-large-cnn,t5-base, etc." ,default="sshleifer/distilbart-xsum-12-3" ,)
parser.add_argument("--save_dir" ,type=snake_case_ ,help="where to save" ,default="tmp_gen" )
parser.add_argument("--max_source_length" ,type=snake_case_ ,default=snake_case_ )
parser.add_argument(
"--type_path" ,type=snake_case_ ,default="test" ,help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" ,type=snake_case_ ,default="summarization" ,help="used for task_specific_params + metrics" )
parser.add_argument("--bs" ,type=snake_case_ ,default=8 ,required=snake_case_ ,help="batch size" )
parser.add_argument(
"--local_rank" ,type=snake_case_ ,default=-1 ,required=snake_case_ ,help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" ,type=snake_case_ ,default=snake_case_ ,required=snake_case_ ,help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" ,type=snake_case_ ,default=1 ,required=snake_case_ ,help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" ,type=snake_case_ ,default=600 ,required=snake_case_ ,help="How long should master process wait for other processes to finish." ,)
parser.add_argument("--src_lang" ,type=snake_case_ ,default=snake_case_ ,required=snake_case_ )
parser.add_argument("--tgt_lang" ,type=snake_case_ ,default=snake_case_ ,required=snake_case_ )
parser.add_argument(
"--prefix" ,type=snake_case_ ,required=snake_case_ ,default=snake_case_ ,help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" ,action="store_true" )
parser.add_argument("--debug" ,action="store_true" )
A_ = time.time()
A_ , A_ = parser.parse_known_args()
A_ = parse_numeric_n_bool_cl_kwargs(snake_case_ )
if generate_kwargs and args.local_rank <= 0:
print(f'''parsed the following generate kwargs: {generate_kwargs}''' )
A_ = Path(args.save_dir + "_tmp" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking.
A_ = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
A_ = {}
if args.src_lang is not None:
A_ = args.src_lang
if args.tgt_lang is not None:
A_ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case_ )
A_ , A_ = eval_data_dir(
args.data_dir ,snake_case_ ,args.model_name ,type_path=args.type_path ,bs=args.bs ,fpaa=args.fpaa ,task=args.task ,local_rank=args.local_rank ,n_obs=args.n_obs ,max_source_length=args.max_source_length ,num_return_sequences=args.num_return_sequences ,prefix=args.prefix ,dataset_kwargs=snake_case_ ,**snake_case_ ,)
if args.local_rank <= 0:
A_ = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case_ )
A_ = gather_results_from_each_node(snake_case_ ,snake_case_ ,args.sync_timeout )
A_ = combine_partial_results(snake_case_ )
if args.num_return_sequences > 1:
A_ = save_dir.joinpath("pseudolabel_results.json" )
print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(snake_case_ ,snake_case_ )
return
A_ = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(snake_case_ ) as f:
A_ = [x.rstrip() for x in f.readlines()][: len(snake_case_ )]
# Calculate metrics, save metrics, and save _generations.txt
A_ = "translation" in args.task
A_ = calculate_bleu if calc_bleu else calculate_rouge
A_ = "bleu" if calc_bleu else "rouge"
A_ = score_fn(snake_case_ ,snake_case_ )
A_ = len(snake_case_ )
A_ = time.time() - start_time
A_ = round(runtime / metrics["n_obs"] ,4 )
A_ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
A_ = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' )
save_json(snake_case_ ,snake_case_ ,indent=snake_case_ )
print(snake_case_ )
write_txt_file(snake_case_ ,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(snake_case_ ,save_dir.joinpath(f'''{args.type_path}.target''' ) )
else:
shutil.rmtree(snake_case_ )
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ = []
for partial_result in partial_results:
records.extend(snake_case_ )
A_ = sorted(snake_case_ ,key=lambda __UpperCamelCase : x["id"] )
A_ = [x["pred"] for x in records]
return preds
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = time.time()
logger.info("waiting for all nodes to finish" )
A_ = None
while (time.time() - start_wait) < timeout:
A_ = list(save_dir.glob("rank_*.json" ) )
if len(snake_case_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
A_ = lmap(snake_case_ ,snake_case_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 370 |
from math import isqrt, loga
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,__UpperCamelCase ,__UpperCamelCase ):
A_ = False
return [i for i in range(2 ,__UpperCamelCase ) if is_prime[i]]
def __snake_case ( __UpperCamelCase : int = 80_0800 ,__UpperCamelCase : int = 80_0800 ):
"""simple docstring"""
A_ = degree * loga(__UpperCamelCase )
A_ = int(__UpperCamelCase )
A_ = calculate_prime_numbers(__UpperCamelCase )
A_ = 0
A_ = 0
A_ = len(__UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A ( self : Optional[int] ):
A_ , A_ = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
A_ = "A painting of a squirrel eating a burger"
A_ = jax.device_count()
A_ = num_samples * [prompt]
A_ = sd_pipe.prepare_inputs(__lowerCAmelCase )
A_ = replicate(__lowerCAmelCase )
A_ = shard(__lowerCAmelCase )
A_ = jax.random.PRNGKey(0 )
A_ = jax.random.split(__lowerCAmelCase , jax.device_count() )
A_ = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
A_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ = images[0, 253:256, 253:256, -1]
A_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __A ( self : str ):
A_ = "stabilityai/stable-diffusion-2"
A_ , A_ = FlaxDPMSolverMultistepScheduler.from_pretrained(__lowerCAmelCase , subfolder="scheduler" )
A_ , A_ = FlaxStableDiffusionPipeline.from_pretrained(
__lowerCAmelCase , scheduler=__lowerCAmelCase , revision="bf16" , dtype=jnp.bfloataa , )
A_ = scheduler_params
A_ = "A painting of a squirrel eating a burger"
A_ = jax.device_count()
A_ = num_samples * [prompt]
A_ = sd_pipe.prepare_inputs(__lowerCAmelCase )
A_ = replicate(__lowerCAmelCase )
A_ = shard(__lowerCAmelCase )
A_ = jax.random.PRNGKey(0 )
A_ = jax.random.split(__lowerCAmelCase , jax.device_count() )
A_ = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
A_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ = images[0, 253:256, 253:256, -1]
A_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 371 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a :str = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = RobertaPreLayerNormConfig.from_pretrained(
__UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) )
A_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A_ = tensor_value
A_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
# convert tokenizer
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 329 | 0 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ):
A_ = 'laion/clap-htsat-unfused'
A_ = tempfile.mkdtemp()
def __A ( self : Optional[int] , **UpperCAmelCase : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **_A )
def __A ( self : Tuple , **UpperCAmelCase : List[str] ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_A )
def __A ( self : str ):
shutil.rmtree(self.tmpdirname )
def __A ( self : Optional[int] ):
A_ = self.get_tokenizer()
A_ = self.get_feature_extractor()
A_ = ClapProcessor(tokenizer=_A , feature_extractor=_A )
processor.save_pretrained(self.tmpdirname )
A_ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
def __A ( self : Dict ):
A_ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A_ = self.get_feature_extractor(do_normalize=_A , padding_value=1.0 )
A_ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
def __A ( self : str ):
A_ = self.get_feature_extractor()
A_ = self.get_tokenizer()
A_ = ClapProcessor(tokenizer=_A , feature_extractor=_A )
A_ = floats_list((3, 1000) )
A_ = feature_extractor(_A , return_tensors="np" )
A_ = processor(audios=_A , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self : Optional[int] ):
A_ = self.get_feature_extractor()
A_ = self.get_tokenizer()
A_ = ClapProcessor(tokenizer=_A , feature_extractor=_A )
A_ = 'This is a test string'
A_ = processor(text=_A )
A_ = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Any ):
A_ = self.get_feature_extractor()
A_ = self.get_tokenizer()
A_ = ClapProcessor(tokenizer=_A , feature_extractor=_A )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(_A )
A_ = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def __A ( self : Optional[Any] ):
A_ = self.get_feature_extractor()
A_ = self.get_tokenizer()
A_ = ClapProcessor(tokenizer=_A , feature_extractor=_A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 350 |
from maths.prime_factors import prime_factors
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a :Union[str, Any] = logging.get_logger(__name__)
__a :Optional[int] = {"vocab_file": "vocab.txt"}
__a :Any = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
__a :List[str] = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
__a :Optional[Any] = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class _a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[Any] = ConvBertTokenizer
def __init__( self : int , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str="[UNK]" , UpperCAmelCase : Tuple="[SEP]" , UpperCAmelCase : Optional[int]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Any="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 351 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a :int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
__a :Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
__a :str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
__a :List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
__a :Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 329 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__a :Union[str, Any] = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__a :Optional[int] = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__a :int = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __A ( self : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ):
A_ = 0.0
for i, j in zip(snake_case_ , snake_case_ ):
n_correct += 1.0 if math_equivalence.is_equiv(snake_case_ , snake_case_ ) else 0.0
A_ = n_correct / len(snake_case_ )
return {
"accuracy": accuracy,
}
| 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a :Union[str, Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 329 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = (DDIMParallelScheduler,)
_lowerCamelCase : Any = (("eta", 0.0), ("num_inference_steps", 5_0))
def __A ( self : List[Any] , **UpperCAmelCase : Optional[int] ):
A_ = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**UpperCAmelCase )
return config
def __A ( self : Union[str, Any] , **UpperCAmelCase : Optional[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCAmelCase )
A_ = scheduler_class(**UpperCAmelCase )
A_ , A_ = 10, 0.0
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for t in scheduler.timesteps:
A_ = model(UpperCAmelCase , UpperCAmelCase )
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def __A ( self : str ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def __A ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase )
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(steps_offset=1 )
A_ = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __A ( self : Union[str, Any] ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def __A ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def __A ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def __A ( self : List[Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def __A ( self : Union[str, Any] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCAmelCase )
def __A ( self : int ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCAmelCase )
def __A ( self : int ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def __A ( self : Union[str, Any] ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCAmelCase )
def __A ( self : Tuple ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=UpperCAmelCase , num_inference_steps=UpperCAmelCase )
def __A ( self : Dict ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCAmelCase , eta=UpperCAmelCase )
def __A ( self : Optional[int] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def __A ( self : str ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ , A_ = 10, 0.0
scheduler.set_timesteps(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = self.dummy_sample_deter + 0.1
A_ = self.dummy_sample_deter - 0.1
A_ = samplea.shape[0]
A_ = torch.stack([samplea, samplea, samplea] , dim=0 )
A_ = torch.arange(UpperCAmelCase )[0:3, None].repeat(1 , UpperCAmelCase )
A_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A_ = scheduler.batch_step_no_noise(UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCAmelCase )
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1E-2
assert abs(result_mean.item() - 0.4_982 ) < 1E-3
def __A ( self : Union[str, Any] ):
A_ = self.full_loop()
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1E-2
assert abs(result_mean.item() - 0.223_967 ) < 1E-3
def __A ( self : Optional[Any] ):
A_ = self.full_loop(prediction_type="v_prediction" )
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1E-2
assert abs(result_mean.item() - 0.0_684 ) < 1E-3
def __A ( self : Dict ):
A_ = self.full_loop(set_alpha_to_one=UpperCAmelCase , beta_start=0.01 )
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1E-2
assert abs(result_mean.item() - 0.1_951 ) < 1E-3
def __A ( self : Optional[int] ):
A_ = self.full_loop(set_alpha_to_one=UpperCAmelCase , beta_start=0.01 )
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1E-2
assert abs(result_mean.item() - 0.1_941 ) < 1E-3
| 353 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ):
return False
return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ):
"""simple docstring"""
A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A_ = is_compiled_module(__UpperCamelCase )
if is_compiled:
A_ = model
A_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = model.module
if not keep_fpaa_wrapper:
A_ = getattr(__UpperCamelCase ,"forward" )
A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase ,"__wrapped__" ):
A_ = forward.__wrapped__
if forward == original_forward:
break
A_ = forward
if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ):
convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase )
if is_compiled:
A_ = model
A_ = compiled_model
return model
def __snake_case ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase ,__UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase ,__UpperCamelCase )
@contextmanager
def __snake_case ( **__UpperCamelCase : Any ):
"""simple docstring"""
for key, value in kwargs.items():
A_ = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ):
A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,"__qualname__" ):
return obj.__qualname__
if hasattr(__UpperCamelCase ,"__name__" ):
return obj.__name__
return str(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
for key, value in source.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = destination.setdefault(__UpperCamelCase ,{} )
merge_dicts(__UpperCamelCase ,__UpperCamelCase )
else:
A_ = value
return destination
def __snake_case ( __UpperCamelCase : int = None ):
"""simple docstring"""
if port is None:
A_ = 2_9500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 329 | 0 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__a :Tuple = get_logger(__name__)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : List[Any]=0 ):
"""simple docstring"""
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
with FSDP.state_dict_type(
__UpperCamelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
A_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A_ = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__UpperCamelCase ,__UpperCamelCase )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A_ = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__UpperCamelCase ,__UpperCamelCase )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A_ = os.path.join(__UpperCamelCase ,f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
logger.info(f'''Saving model to {ckpt_dir}''' )
A_ = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=__UpperCamelCase ,storage_writer=dist_cp.FileSystemWriter(__UpperCamelCase ) ,planner=DefaultSavePlanner() ,)
logger.info(f'''Model saved to {ckpt_dir}''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCamelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__UpperCamelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
A_ = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
logger.info(f'''Loading model from {input_model_file}''' )
A_ = torch.load(__UpperCamelCase )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A_ = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
logger.info(f'''Loading model from {input_model_file}''' )
A_ = torch.load(__UpperCamelCase )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A_ = (
os.path.join(__UpperCamelCase ,f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
A_ = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=__UpperCamelCase ,storage_reader=dist_cp.FileSystemReader(__UpperCamelCase ) ,planner=DefaultLoadPlanner() ,)
A_ = state_dict["model"]
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple=0 ):
"""simple docstring"""
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
with FSDP.state_dict_type(
__UpperCamelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
A_ = FSDP.optim_state_dict(__UpperCamelCase ,__UpperCamelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
A_ = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(__UpperCamelCase ,__UpperCamelCase )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
A_ = os.path.join(__UpperCamelCase ,f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} ,storage_writer=dist_cp.FileSystemWriter(__UpperCamelCase ) ,planner=DefaultSavePlanner() ,)
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any]=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCamelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
A_ = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
A_ = torch.load(__UpperCamelCase )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
A_ = (
os.path.join(__UpperCamelCase ,f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
A_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() ,optimizer_key="optimizer" ,storage_reader=dist_cp.FileSystemReader(__UpperCamelCase ) ,)
A_ = optim_state["optimizer"]
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
A_ = FSDP.optim_state_dict_to_load(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
optimizer.load_state_dict(__UpperCamelCase )
| 354 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ):
A_ = tempfile.mkdtemp()
A_ = BlipImageProcessor()
A_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
A_ = BlipProcessor(UpperCAmelCase , UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer
def __A ( self : Optional[Any] , **UpperCAmelCase : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def __A ( self : Any ):
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ):
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : Any ):
A_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
A_ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCAmelCase , return_tensors="np" )
A_ = processor(images=UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self : int ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = processor(text=UpperCAmelCase )
A_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Tuple ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def __A ( self : Any ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(UpperCAmelCase )
A_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 329 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__a :List[Any] = pytest.mark.integration
@pytest.mark.parametrize("path" ,["paws", "csv"] )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
inspect_dataset(__UpperCamelCase ,__UpperCamelCase )
A_ = path + ".py"
assert script_name in os.listdir(__UpperCamelCase )
assert "__pycache__" not in os.listdir(__UpperCamelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" ,["accuracy"] )
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Dict ):
"""simple docstring"""
inspect_metric(__UpperCamelCase ,__UpperCamelCase )
A_ = path + ".py"
assert script_name in os.listdir(__UpperCamelCase )
assert "__pycache__" not in os.listdir(__UpperCamelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = get_dataset_config_info(__UpperCamelCase ,config_name=__UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
with pytest.raises(__UpperCamelCase ):
get_dataset_config_info(__UpperCamelCase ,config_name=__UpperCamelCase )
@pytest.mark.parametrize(
"path, expected" ,[
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] ,)
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = get_dataset_config_names(__UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" ,[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] ,)
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = get_dataset_infos(__UpperCamelCase )
assert list(infos.keys() ) == expected_configs
A_ = expected_configs[0]
assert expected_config in infos
A_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = get_dataset_infos(__UpperCamelCase )
assert expected_config in infos
A_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
with pytest.raises(__UpperCamelCase ):
get_dataset_split_names(__UpperCamelCase ,config_name=__UpperCamelCase )
| 355 |
import math
__a :Union[str, Any] = 10
__a :Union[str, Any] = 7
__a :int = BALLS_PER_COLOUR * NUM_COLOURS
def __snake_case ( __UpperCamelCase : int = 20 ):
"""simple docstring"""
A_ = math.comb(__UpperCamelCase ,__UpperCamelCase )
A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase )
A_ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 329 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any ,__UpperCamelCase : str ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int=True ):
"""simple docstring"""
model.train()
A_ = model(lowerCamelCase_ )
A_ = F.mse_loss(lowerCamelCase_ ,target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase_ )
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
set_seed(42 )
A_ = RegressionModel()
A_ = deepcopy(lowerCamelCase_ )
A_ = RegressionDataset(length=80 )
A_ = DataLoader(lowerCamelCase_ ,batch_size=16 )
model.to(accelerator.device )
if sched:
A_ = AdamW(params=model.parameters() ,lr=1E-3 )
A_ = AdamW(params=ddp_model.parameters() ,lr=1E-3 )
A_ = LambdaLR(lowerCamelCase_ ,lr_lambda=lambda __UpperCamelCase : epoch**0.65 )
A_ = LambdaLR(lowerCamelCase_ ,lr_lambda=lambda __UpperCamelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
A_ = accelerator.prepare(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
else:
A_ = accelerator.prepare(lowerCamelCase_ ,lowerCamelCase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = get_training_setup(lowerCamelCase_ )
# Use a single batch
A_ = next(iter(lowerCamelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A_ = accelerator.gather((ddp_input, ddp_target) )
A_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase_ ):
step_model(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
else:
# Sync grads
step_model(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad ,ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
A_ = ddp_input[torch.randperm(len(lowerCamelCase_ ) )]
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = get_training_setup(lowerCamelCase_ )
# Use a single batch
A_ = next(iter(lowerCamelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A_ = accelerator.gather((ddp_input, ddp_target) )
A_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase_ ):
step_model(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
else:
# Sync grads
step_model(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
A_ = ddp_input[torch.randperm(len(lowerCamelCase_ ) )]
def __snake_case ( __UpperCamelCase : Dict=False ,__UpperCamelCase : Any=False ):
"""simple docstring"""
A_ = Accelerator(
split_batches=lowerCamelCase_ ,dispatch_batches=lowerCamelCase_ ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A_ = get_training_setup(lowerCamelCase_ )
for iteration, batch in enumerate(lowerCamelCase_ ):
A_ = batch.values()
# Gather the distributed inputs and targs for the base model
A_ = accelerator.gather((ddp_input, ddp_target) )
A_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase_ ):
step_model(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
A_ = ddp_input[torch.randperm(len(lowerCamelCase_ ) )]
GradientState._reset_state()
def __snake_case ( __UpperCamelCase : int=False ,__UpperCamelCase : Optional[int]=False ):
"""simple docstring"""
A_ = Accelerator(
split_batches=lowerCamelCase_ ,dispatch_batches=lowerCamelCase_ ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A_ = get_training_setup(lowerCamelCase_ ,lowerCamelCase_ )
for iteration, batch in enumerate(lowerCamelCase_ ):
A_ = batch.values()
# Gather the distributed inputs and targs for the base model
A_ = accelerator.gather((ddp_input, ddp_target) )
A_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase_ ):
step_model(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
A_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase_ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __snake_case ( ):
"""simple docstring"""
A_ = Accelerator()
A_ = RegressionDataset(length=80 )
A_ = DataLoader(lowerCamelCase_ ,batch_size=16 )
A_ = RegressionDataset(length=96 )
A_ = DataLoader(lowerCamelCase_ ,batch_size=16 )
A_ = accelerator.prepare(lowerCamelCase_ ,lowerCamelCase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase_ )
if iteration < len(lowerCamelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase_ )
if batch_num < len(lowerCamelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __snake_case ( ):
"""simple docstring"""
A_ = Accelerator()
A_ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowerCamelCase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowerCamelCase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " ,f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' ,)
test_gradient_accumulation(lowerCamelCase_ ,lowerCamelCase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" ,"2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " ,"`split_batches=False`, `dispatch_batches=False`**" ,)
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " ,f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' ,)
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase_ ,lowerCamelCase_ )
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 356 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a :Optional[Any] = logging.get_logger(__name__)
__a :Any = {'vocab_file': 'vocab.txt'}
__a :Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a :List[str] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a :List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = ConvBertTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 329 | 0 |
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = len(UpperCAmelCase_ )
while cur > 1:
# Find the maximum number in arr
A_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A_ = arr[mi::-1] + arr[mi + 1 : len(UpperCAmelCase_ )]
# Reverse whole list
A_ = arr[cur - 1 :: -1] + arr[cur : len(UpperCAmelCase_ )]
cur -= 1
return arr
if __name__ == "__main__":
__a :Tuple = input('Enter numbers separated by a comma:\n').strip()
__a :Any = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 357 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a :Optional[Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 329 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Any ):
A_ = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
A_ = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
sd_pipe.set_scheduler("sample_euler" )
A_ = 'A painting of a squirrel eating a burger'
A_ = torch.manual_seed(0 )
A_ = sd_pipe([prompt] , generator=__snake_case , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self : Optional[Any] ):
A_ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
A_ = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
sd_pipe.set_scheduler("sample_euler" )
A_ = 'A painting of a squirrel eating a burger'
A_ = torch.manual_seed(0 )
A_ = sd_pipe([prompt] , generator=__snake_case , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __A ( self : str ):
A_ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
A_ = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
A_ = 'A painting of a squirrel eating a burger'
A_ = torch.manual_seed(0 )
A_ = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=__snake_case , )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 358 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __A ( self : List[str] ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@require_tf
def __A ( self : int ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __A ( self : Any ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __A ( self : Optional[Any] ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 329 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__a :List[str] = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__a :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__a :int = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __A ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[Any] = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A__ , hypotheses=A__ , min_len=A__ , max_len=A__ )
}
| 359 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ):
"""simple docstring"""
A_ = []
for _ in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ):
"""simple docstring"""
A_ = []
for step in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase ,"schedule.bin" )
torch.save(scheduler.state_dict() ,__UpperCamelCase )
A_ = torch.load(__UpperCamelCase )
scheduler.load_state_dict(__UpperCamelCase )
return lrs
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def __A ( self : Dict ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , )
for _ in range(1000 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_lowerCamelCase : Any = 1_0
def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ , A_ = data
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ = unwrap_schedule(UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule
A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : List[str] ):
A_ = fn
def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ):
return self.fn(*UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A ( self : Dict , UpperCAmelCase : List[str] ):
A_ = list(map(self , scheduler.lr_lambdas ) )
| 329 | 0 |
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
A_ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
A_ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
A_ = max(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) ,b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : int
_lowerCamelCase : str
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Dict ):
A_ = {}
A_ = []
A_ = 1
A_ = [1, 2]
A_ = {"a": 1, "b": 2}
A_ = {"a": [1, 2], "b": [3, 4]}
A_ = {"a": {"1": 1}, "b": 2}
A_ = {"a": 1, "b": 2, "c": 3, "d": 4}
A_ = {}
A_ = []
A_ = 2
A_ = [2, 3]
A_ = {"a": 2, "b": 3}
A_ = {"a": [2, 3], "b": [4, 5]}
A_ = {"a": {"1": 2}, "b": 3}
A_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
A_ = 2
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
A_ = {"a": 2, "b": 0, "c": 2}
A_ = {
"a": np.eye(2 ).astype(UpperCAmelCase ),
"b": np.zeros(3 ).astype(UpperCAmelCase ),
"c": np.ones(2 ).astype(UpperCAmelCase ),
}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase )
def __A ( self : List[str] ):
A_ = {"a": 1, "b": 2}
A_ = {"a": 3, "b": 4}
A_ = {"a": 5, "b": 6}
A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase )
def __A ( self : Any ):
class _a :
"""simple docstring"""
_lowerCamelCase : int = 'bar'
A_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
A_ = {f'''{i}''': i for i in range(__UpperCamelCase )}
A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _a ( snake_case_ ):
"""simple docstring"""
@require_tf
def __A ( self : Union[str, Any] ):
import tensorflow as tf
from tensorflow.keras import layers
A_ = layers.Dense(2 )
def gen_random_output():
A_ = tf.random.uniform((1, 3) )
return model(UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __A ( self : Optional[int] ):
import torch
def gen_random_output():
A_ = torch.nn.Linear(3 , 2 )
A_ = torch.rand(1 , 3 )
return model(UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __A ( self : Any ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A_ = gen_random_output()
with temp_seed(42 ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" ,[{}] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" ,[
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] ,)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def __snake_case ( ):
"""simple docstring"""
A_ = A(x=1 ,y="foobar" )
A_ = {"x": 1, "y": "foobar"}
assert asdict(__UpperCamelCase ) == expected_output
A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]}
A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 ,y="foo" )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return text.split()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __snake_case ( ):
"""simple docstring"""
with Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A_ = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__UpperCamelCase ) == 4
| 329 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( lowerCamelCase__ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (UnCLIPScheduler,)
def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ):
A_ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__A )
return config
def __A ( self : Tuple ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def __A ( self : List[Any] ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__A )
def __A ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def __A ( self : Dict ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__A )
def __A ( self : int ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__A )
def __A ( self : int ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__A , prev_timestep=__A )
def __A ( self : int ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(variance_type="fixed_small_log" )
A_ = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def __A ( self : int ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(variance_type="learned_range" )
A_ = scheduler_class(**__A )
A_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__A ) - -10.1712790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=__A ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=__A ) - -0.0_010_011 < 1E-5
def __A ( self : List[str] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**__A )
A_ = scheduler.timesteps
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
A_ = model(__A , __A )
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(__A , __A , __A , generator=__A ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(__A ) )
A_ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 252.2682495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**__A )
scheduler.set_timesteps(25 )
A_ = scheduler.timesteps
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
A_ = model(__A , __A )
if i + 1 == timesteps.shape[0]:
A_ = None
else:
A_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(
__A , __A , __A , prev_timestep=__A , generator=__A ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(__A ) )
A_ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 258.2044983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def __A ( self : List[str] ):
pass
def __A ( self : List[Any] ):
pass
| 361 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for char in word:
A_ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
for token in tokens:
A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A_ = list(__UpperCamelCase )
return word_list
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(__UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
A_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
A_ = "##" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ):
"""simple docstring"""
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
A_ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__a :Dict = parser.parse_args()
main(args)
| 329 | 0 |
import re
import subprocess
import sys
__a :int = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__a :Tuple = (
subprocess.check_output(F"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode('utf-8').split()
)
__a :int = '|'.join(sys.argv[1:])
__a :List[Any] = re.compile(RF"^({joined_dirs}).*?\.py$")
__a :Optional[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 362 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __snake_case ( __UpperCamelCase : Features ):
"""simple docstring"""
A_ = np.inf
def set_batch_size(__UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and feature.dtype == "binary":
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCamelCase ,__UpperCamelCase )
return None if batch_size is np.inf else batch_size
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Tuple , ):
super().__init__(
UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , )
A_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES["parquet"][1]
A_ = Parquet(
cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , hash=UpperCAmelCase , **UpperCAmelCase , )
def __A ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : Union[PathLike, BinaryIO] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ):
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self : int ):
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
A_ = self._write(file_obj=UpperCAmelCase , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
return written
def __A ( self : Tuple , UpperCAmelCase : BinaryIO , UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ):
A_ = 0
A_ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase , **UpperCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
A_ = query_table(
table=self.dataset._data , key=slice(UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCAmelCase )
written += batch.nbytes
writer.close()
return written
| 329 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__a :Any = logging.getLogger(__name__)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Dict = None ,__UpperCamelCase : Tuple = None ,__UpperCamelCase : str = None ,__UpperCamelCase : Optional[Any] = None ,__UpperCamelCase : Optional[Any] = None ,__UpperCamelCase : Optional[int] = False ,):
"""simple docstring"""
A_ = bnb_quantization_config.load_in_abit
A_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
A_ = []
# custom device map
if isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) and len(device_map.keys() ) > 1:
A_ = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A_ = get_keys_to_not_convert(UpperCAmelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(UpperCAmelCase__ )
A_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A_ = []
A_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(UpperCAmelCase__ )
# compatibility with peft
A_ = load_in_abit
A_ = load_in_abit
A_ = get_parameter_device(UpperCAmelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
A_ = replace_with_bnb_layers(UpperCAmelCase__ ,UpperCAmelCase__ ,modules_to_not_convert=UpperCAmelCase__ )
# convert param to the right dtype
A_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A_ = name.replace(".weight" ,"" ).replace(".bias" ,"" )
A_ = getattr(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(UpperCAmelCase__ ):
param.to(UpperCAmelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A_ = replace_with_bnb_layers(
UpperCAmelCase__ ,UpperCAmelCase__ ,modules_to_not_convert=UpperCAmelCase__ )
A_ = get_quantized_model_device_map(
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,max_memory=UpperCAmelCase__ ,no_split_module_classes=UpperCAmelCase__ ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A_ = True
A_ = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=UpperCAmelCase__ ,offload_state_dict=UpperCAmelCase__ ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(UpperCAmelCase__ ,device_map=UpperCAmelCase__ ,offload_dir=UpperCAmelCase__ )
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : int=None ,__UpperCamelCase : Optional[Any]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A_ = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
A_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A_ = {}
A_ = special_dtypes
A_ = no_split_module_classes
A_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A_ = get_balanced_memory(
UpperCAmelCase__ ,low_zero=(device_map == "balanced_low_0") ,max_memory=UpperCAmelCase__ ,**UpperCAmelCase__ ,)
A_ = max_memory
A_ = infer_auto_device_map(UpperCAmelCase__ ,**UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ):
# check if don't have any quantized module on the cpu
A_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict=None ,__UpperCamelCase : Any=None ):
"""simple docstring"""
if modules_to_not_convert is None:
A_ = []
A_ , A_ = _replace_with_bnb_layers(
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : str=None ,__UpperCamelCase : Tuple=None ,):
"""simple docstring"""
A_ = False
for name, module in model.named_children():
if current_key_name is None:
A_ = []
current_key_name.append(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ ,nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A_ = ".".join(UpperCAmelCase__ )
A_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A_ = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=UpperCAmelCase__ ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
A_ = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
A_ = module.weight.data
if module.bias is not None:
A_ = module.bias.data
bnb_module.requires_grad_(UpperCAmelCase__ )
setattr(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
A_ = True
if len(list(module.children() ) ) > 0:
A_ , A_ = _replace_with_bnb_layers(
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
A_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
with init_empty_weights():
A_ = deepcopy(UpperCAmelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A_ = find_tied_parameters(UpperCAmelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ):
A_ = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
A_ = sum(UpperCAmelCase__ ,[] )
A_ = len(UpperCAmelCase__ ) > 0
# Check if it is a base model
A_ = False
if hasattr(UpperCAmelCase__ ,"base_model_prefix" ):
A_ = not hasattr(UpperCAmelCase__ ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A_ = list(model.named_children() )
A_ = [list_modules[-1][0]]
# add last module together with tied weights
A_ = set(UpperCAmelCase__ ) - set(UpperCAmelCase__ )
A_ = list(set(UpperCAmelCase__ ) ) + list(UpperCAmelCase__ )
# remove ".weight" from the keys
A_ = [".weight", ".bias"]
A_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A_ = name.replace(UpperCAmelCase__ ,"" )
filtered_module_names.append(UpperCAmelCase__ )
return filtered_module_names
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
for m in model.modules():
if isinstance(UpperCAmelCase__ ,bnb.nn.Linearabit ):
return True
return False
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
return next(parameter.parameters() ).device
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Any ,__UpperCamelCase : Dict ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(UpperCAmelCase__ ,UpperCAmelCase__ ,0 ,dtype=UpperCAmelCase__ ,value=UpperCAmelCase__ )
A_ = param_name
A_ = model
if "." in tensor_name:
A_ = tensor_name.split("." )
for split in splits[:-1]:
A_ = getattr(UpperCAmelCase__ ,UpperCAmelCase__ )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A_ = new_module
A_ = splits[-1]
# offload weights
A_ = False
offload_weight(module._parameters[tensor_name] ,UpperCAmelCase__ ,UpperCAmelCase__ ,index=UpperCAmelCase__ )
if hasattr(module._parameters[tensor_name] ,"SCB" ):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace("weight" ,"SCB" ) ,UpperCAmelCase__ ,index=UpperCAmelCase__ ,)
else:
offload_weight(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,index=UpperCAmelCase__ )
offload_weight(UpperCAmelCase__ ,param_name.replace("weight" ,"SCB" ) ,UpperCAmelCase__ ,index=UpperCAmelCase__ )
set_module_tensor_to_device(UpperCAmelCase__ ,UpperCAmelCase__ ,"meta" ,dtype=UpperCAmelCase__ ,value=torch.empty(*param.size() ) )
| 363 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int = 4 ):
"""simple docstring"""
A_ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = matrix[::-1]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [x[::-1] for x in matrix]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 329 | 0 |
__a :Optional[Any] = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 364 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Any = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 329 | 0 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __snake_case ( __UpperCamelCase : np.ndarray ,__UpperCamelCase : np.ndarray ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(snake_case__ ,snake_case__ ) ) )
def __snake_case ( __UpperCamelCase : np.ndarray ,__UpperCamelCase : np.ndarray ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
A_ = (
'Wrong input data\'s dimensions... '
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
A_ = (
'Wrong input data\'s shape... '
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
A_ = (
'Input data have different datatype... '
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(snake_case__ )
A_ = []
for value in value_array:
A_ = euclidean(snake_case__ ,dataset[0] )
A_ = dataset[0].tolist()
for dataset_value in dataset[1:]:
A_ = euclidean(snake_case__ ,snake_case__ )
if dist > temp_dist:
A_ = temp_dist
A_ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __snake_case ( __UpperCamelCase : np.ndarray ,__UpperCamelCase : np.ndarray ):
"""simple docstring"""
return np.dot(snake_case__ ,snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
import itertools
import math
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( ):
"""simple docstring"""
A_ = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __snake_case ( __UpperCamelCase : int = 1_0001 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__a :Dict = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 366 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
| 329 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int]=0.999 ,__UpperCamelCase : Dict="cosine" ,):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Dict ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A_ = []
for i in range(__UpperCamelCase ):
A_ = i / num_diffusion_timesteps
A_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) )
return torch.tensor(__UpperCamelCase ,dtype=torch.floataa )
class _a ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [e.name for e in KarrasDiffusionSchedulers]
_lowerCamelCase : Dict = 2
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase : int = 1000 , UpperCAmelCase : Optional[int] = 0.00_085 , UpperCAmelCase : str = 0.012 , UpperCAmelCase : Any = "linear" , UpperCAmelCase : Optional[Any] = None , UpperCAmelCase : Any = "epsilon" , UpperCAmelCase : int = "linspace" , UpperCAmelCase : List[str] = 0 , ):
if trained_betas is not None:
A_ = torch.tensor(__lowercase , dtype=torch.floataa )
elif beta_schedule == "linear":
A_ = torch.linspace(__lowercase , __lowercase , __lowercase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowercase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A_ = betas_for_alpha_bar(__lowercase )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
A_ = 1.0 - self.betas
A_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__lowercase , __lowercase , __lowercase )
def __A ( self : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=None ):
if schedule_timesteps is None:
A_ = self.timesteps
A_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A_ = 1 if len(__lowercase ) > 1 else 0
else:
A_ = timestep.cpu().item() if torch.is_tensor(__lowercase ) else timestep
A_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __A ( self : Optional[Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , ):
A_ = self.index_for_timestep(__lowercase )
if self.state_in_first_order:
A_ = self.sigmas[step_index]
else:
A_ = self.sigmas_interpol[step_index]
A_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] = None , UpperCAmelCase : Tuple = None , ):
A_ = num_inference_steps
A_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A_ = np.linspace(0 , num_train_timesteps - 1 , __lowercase , dtype=__lowercase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A_ = (np.arange(0 , __lowercase ) * step_ratio).round()[::-1].copy().astype(__lowercase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A_ = (np.arange(__lowercase , 0 , -step_ratio )).round().copy().astype(__lowercase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
A_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A_ = torch.from_numpy(np.log(__lowercase ) ).to(__lowercase )
A_ = np.interp(__lowercase , np.arange(0 , len(__lowercase ) ) , __lowercase )
A_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A_ = torch.from_numpy(__lowercase ).to(device=__lowercase )
# interpolate sigmas
A_ = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
A_ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
A_ = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__lowercase ).startswith("mps" ):
# mps does not support float64
A_ = torch.from_numpy(__lowercase ).to(__lowercase , dtype=torch.floataa )
else:
A_ = torch.from_numpy(__lowercase ).to(__lowercase )
# interpolate timesteps
A_ = self.sigma_to_t(__lowercase ).to(__lowercase , dtype=timesteps.dtype )
A_ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
A_ = torch.cat([timesteps[:1], interleaved_timesteps] )
A_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A_ = defaultdict(__lowercase )
def __A ( self : Union[str, Any] , UpperCAmelCase : Dict ):
# get log sigma
A_ = sigma.log()
# get distribution
A_ = log_sigma - self.log_sigmas[:, None]
# get sigmas range
A_ = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
A_ = low_idx + 1
A_ = self.log_sigmas[low_idx]
A_ = self.log_sigmas[high_idx]
# interpolate sigmas
A_ = (low - log_sigma) / (low - high)
A_ = w.clamp(0 , 1 )
# transform interpolation to time range
A_ = (1 - w) * low_idx + w * high_idx
A_ = t.view(sigma.shape )
return t
@property
def __A ( self : Dict ):
return self.sample is None
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str = True , ):
A_ = self.index_for_timestep(__lowercase )
# advance index counter by 1
A_ = timestep.cpu().item() if torch.is_tensor(__lowercase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A_ = self.sigmas[step_index]
A_ = self.sigmas_interpol[step_index + 1]
A_ = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
A_ = self.sigmas[step_index - 1]
A_ = self.sigmas_interpol[step_index]
A_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A_ = 0
A_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A_ = sigma_hat if self.state_in_first_order else sigma_interpol
A_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A_ = sigma_hat if self.state_in_first_order else sigma_interpol
A_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A_ = sigma_interpol - sigma_hat
# store for 2nd order step
A_ = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
A_ = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
A_ = sigma_next - sigma_hat
A_ = self.sample
A_ = None
A_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowercase ):
# mps does not support float64
A_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
A_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
A_ = self.timesteps.to(original_samples.device )
A_ = timesteps.to(original_samples.device )
A_ = [self.index_for_timestep(__lowercase , __lowercase ) for t in timesteps]
A_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A_ = sigma.unsqueeze(-1 )
A_ = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ):
return self.config.num_train_timesteps
| 367 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Dict = logging.get_logger(__name__)
__a :int = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'realm'
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
# Common config
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = retriever_proj_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_candidates
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
# Reader config
A_ = span_hidden_size
A_ = max_span_width
A_ = reader_layer_norm_eps
A_ = reader_beam_size
A_ = reader_seq_len
# Retrieval config
A_ = num_block_records
A_ = searcher_beam_size
| 329 | 0 |
__a :List[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = [False] * len(snake_case_ )
A_ = [s]
A_ = True
while queue:
A_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case_ )
A_ = True
A_ = u
return visited[t]
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Tuple ,__UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = [-1] * (len(snake_case_ ))
A_ = 0
A_ = []
A_ = [i[:] for i in graph] # Record original cut, copy.
while bfs(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ):
A_ = float("Inf" )
A_ = sink
while s != source:
# Find the minimum value in select path
A_ = min(snake_case_ ,graph[parent[s]][s] )
A_ = parent[s]
max_flow += path_flow
A_ = sink
while v != source:
A_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
A_ = parent[v]
for i in range(len(snake_case_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 368 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = original_name.split("." )[0]
A_ = key.split("." )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
A_ = orig_block_num - offset
A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = OrderedDict()
A_ , A_ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
A_ = key.replace("network" ,"poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
A_ = key[: key.find("proj" )]
A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' )
A_ = key.replace("proj" ,"projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
A_ = "poolformer.encoder." + key
if "mlp.fc1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" )
if "mlp.fc2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" )
if "norm1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" )
if "norm2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" )
if "layer_scale_1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" )
if "layer_scale_2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" )
if "head" in key:
A_ = key.replace("head" ,"classifier" )
A_ = value
return new_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = PoolFormerConfig()
# set attributes based on model_name
A_ = "huggingface/label-files"
A_ = model_name[-3:]
A_ = 1000
A_ = "imagenet-1k-id2label.json"
A_ = (1, 1000)
# set config attributes
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ = [2, 2, 6, 2]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s24":
A_ = [4, 4, 12, 4]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s36":
A_ = [6, 6, 18, 6]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 1E-6
A_ = 0.9
elif size == "m36":
A_ = [6, 6, 18, 6]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
elif size == "m48":
A_ = [8, 8, 24, 8]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) )
# rename keys
A_ = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
A_ = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values
# forward pass
A_ = model(__UpperCamelCase )
A_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
A_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
A_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
A_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
A_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__a :int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
return "".join(sorted(lowerCAmelCase_ ) )
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
return word_by_signature[signature(lowerCAmelCase_ )]
__a :Union[str, Any] = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
__a :List[str] = sorted({word.strip().lower() for word in data.splitlines()})
__a :Optional[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__a :List[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 369 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : torch.FloatTensor
_lowerCamelCase : Optional[torch.FloatTensor] = None
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A_ = []
for i in range(__UpperCamelCase ):
A_ = i / num_diffusion_timesteps
A_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) )
return torch.tensor(__UpperCamelCase ,dtype=torch.floataa )
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
A_ = betas_for_alpha_bar(UpperCAmelCase )
A_ = 1.0 - self.betas
A_ = torch.cumprod(self.alphas , dim=0 )
A_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A_ = 1.0
# setable values
A_ = None
A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() )
A_ = variance_type
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
A_ = num_inference_steps
A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ):
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) )
A_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A_ = variance.log()
A_ = beta.log()
A_ = (predicted_variance + 1) / 2
A_ = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ):
A_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 )
else:
A_ = None
# 1. compute alphas, betas
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
A_ = self.alphas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
A_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A_ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A_ = torch.clamp(
UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A_ = 0
if t > 0:
A_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device )
A_ = self._get_variance(
UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
A_ = variance
elif self.variance_type == "learned_range":
A_ = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
A_ = variance * variance_noise
A_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A_ = timesteps.to(original_samples.device )
A_ = alphas_cumprod[timesteps] ** 0.5
A_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_alpha_prod.unsqueeze(-1 )
A_ = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 329 | 0 |
from __future__ import annotations
import math
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = str(__SCREAMING_SNAKE_CASE )
A_ = [n]
for i in range(1 ,len(__SCREAMING_SNAKE_CASE ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
if len(str(__SCREAMING_SNAKE_CASE ) ) > 3:
if not is_prime(int(str(__SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(__SCREAMING_SNAKE_CASE )[:3] ) ):
return False
return True
def __snake_case ( __UpperCamelCase : Union[str, Any] = 11 ):
"""simple docstring"""
A_ = []
A_ = 13
while len(__SCREAMING_SNAKE_CASE ) != count:
if validate(__SCREAMING_SNAKE_CASE ):
A_ = list_truncated_nums(__SCREAMING_SNAKE_CASE )
if all(is_prime(__SCREAMING_SNAKE_CASE ) for i in list_nums ):
list_truncated_primes.append(__SCREAMING_SNAKE_CASE )
num += 2
return list_truncated_primes
def __snake_case ( ):
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"{sum(compute_truncated_primes(11)) = }")
| 370 |
from math import isqrt, loga
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,__UpperCamelCase ,__UpperCamelCase ):
A_ = False
return [i for i in range(2 ,__UpperCamelCase ) if is_prime[i]]
def __snake_case ( __UpperCamelCase : int = 80_0800 ,__UpperCamelCase : int = 80_0800 ):
"""simple docstring"""
A_ = degree * loga(__UpperCamelCase )
A_ = int(__UpperCamelCase )
A_ = calculate_prime_numbers(__UpperCamelCase )
A_ = 0
A_ = 0
A_ = len(__UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
import os
import sys
import unittest
__a :str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__a :Optional[int] = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
__a :List[str] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ):
A_ = get_test_to_tester_mapping(__snake_case )
A_ = get_test_to_tester_mapping(__snake_case )
A_ = {"BertModelTest": "BertModelTester"}
A_ = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(__snake_case ) , __snake_case )
self.assertEqual(get_test_info.to_json(__snake_case ) , __snake_case )
def __A ( self : int ):
A_ = get_model_to_test_mapping(__snake_case )
A_ = get_model_to_test_mapping(__snake_case )
A_ = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
A_ = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(__snake_case ) , __snake_case )
self.assertEqual(get_test_info.to_json(__snake_case ) , __snake_case )
def __A ( self : List[Any] ):
A_ = get_model_to_tester_mapping(__snake_case )
A_ = get_model_to_tester_mapping(__snake_case )
A_ = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
A_ = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(__snake_case ) , __snake_case )
self.assertEqual(get_test_info.to_json(__snake_case ) , __snake_case )
| 371 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a :str = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = RobertaPreLayerNormConfig.from_pretrained(
__UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) )
A_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A_ = tensor_value
A_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
# convert tokenizer
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 329 | 0 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__a :Any = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__a :List[Any] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
__a :Optional[Any] = 'zero2'
__a :Union[str, Any] = 'zero3'
__a :Tuple = [ZEROa, ZEROa]
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = parameterized.to_safe_name("_".join(str(__UpperCamelCase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
__a :List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _a ( snake_case_ ):
"""simple docstring"""
@parameterized.expand(UpperCAmelCase , name_func=UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : int ):
self.run_and_check(
stage=UpperCAmelCase , model=UpperCAmelCase , distributed=UpperCAmelCase , fpaa=UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase , name_func=UpperCAmelCase )
def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int ):
self.run_and_check(
stage=UpperCAmelCase , model=UpperCAmelCase , distributed=UpperCAmelCase , fpaa=UpperCAmelCase , )
@parameterized.expand(UpperCAmelCase , name_func=UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ):
self.run_and_check(
stage=UpperCAmelCase , model=UpperCAmelCase , distributed=UpperCAmelCase , fpaa=UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase , name_func=UpperCAmelCase )
def __A ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] ):
self.run_and_check(
stage=UpperCAmelCase , model=UpperCAmelCase , distributed=UpperCAmelCase , fpaa=UpperCAmelCase , )
def __A ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : int = 10 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , ):
A_ = models[model]
A_ = self.run_trainer(
stage=UpperCAmelCase , model_name=UpperCAmelCase , eval_steps=UpperCAmelCase , num_train_epochs=1 , distributed=UpperCAmelCase , fpaa=UpperCAmelCase , )
self.do_checks(UpperCAmelCase )
return output_dir
def __A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : int = 10 , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , ):
A_ = self.get_auto_remove_tmp_dir("./xxx" , after=UpperCAmelCase )
A_ = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(UpperCAmelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A_ = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
A_ = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
A_ = self.get_launcher(UpperCAmelCase )
A_ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
return output_dir
def __A ( self : int , UpperCAmelCase : Optional[Any]=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
A_ = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 350 |
from maths.prime_factors import prime_factors
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 | 0 |
"""simple docstring"""
__a :str = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__a :Dict = {value: key for key, value in MORSE_CODE_DICT.items()}
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def __snake_case ( ):
"""simple docstring"""
A_ = "Morse code here!"
print(__UpperCamelCase )
A_ = encrypt(__UpperCamelCase )
print(__UpperCamelCase )
A_ = decrypt(__UpperCamelCase )
print(__UpperCamelCase )
if __name__ == "__main__":
main()
| 351 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a :int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
__a :Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
__a :str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
__a :List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
__a :Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 329 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : torch.FloatTensor
_lowerCamelCase : Optional[torch.FloatTensor] = None
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A_ = []
for i in range(__UpperCamelCase ):
A_ = i / num_diffusion_timesteps
A_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) )
return torch.tensor(__UpperCamelCase ,dtype=torch.floataa )
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
A_ = betas_for_alpha_bar(UpperCAmelCase )
A_ = 1.0 - self.betas
A_ = torch.cumprod(self.alphas , dim=0 )
A_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A_ = 1.0
# setable values
A_ = None
A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() )
A_ = variance_type
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
A_ = num_inference_steps
A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ):
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) )
A_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A_ = variance.log()
A_ = beta.log()
A_ = (predicted_variance + 1) / 2
A_ = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ):
A_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 )
else:
A_ = None
# 1. compute alphas, betas
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
A_ = self.alphas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
A_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A_ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A_ = torch.clamp(
UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A_ = 0
if t > 0:
A_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device )
A_ = self._get_variance(
UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
A_ = variance
elif self.variance_type == "learned_range":
A_ = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
A_ = variance * variance_noise
A_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A_ = timesteps.to(original_samples.device )
A_ = alphas_cumprod[timesteps] ** 0.5
A_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_alpha_prod.unsqueeze(-1 )
A_ = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a :Union[str, Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 329 | 0 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a :str = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = RobertaPreLayerNormConfig.from_pretrained(
__UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) )
A_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A_ = tensor_value
A_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
# convert tokenizer
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 353 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ):
return False
return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ):
"""simple docstring"""
A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A_ = is_compiled_module(__UpperCamelCase )
if is_compiled:
A_ = model
A_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = model.module
if not keep_fpaa_wrapper:
A_ = getattr(__UpperCamelCase ,"forward" )
A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase ,"__wrapped__" ):
A_ = forward.__wrapped__
if forward == original_forward:
break
A_ = forward
if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ):
convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase )
if is_compiled:
A_ = model
A_ = compiled_model
return model
def __snake_case ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase ,__UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase ,__UpperCamelCase )
@contextmanager
def __snake_case ( **__UpperCamelCase : Any ):
"""simple docstring"""
for key, value in kwargs.items():
A_ = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ):
A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,"__qualname__" ):
return obj.__qualname__
if hasattr(__UpperCamelCase ,"__name__" ):
return obj.__name__
return str(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
for key, value in source.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = destination.setdefault(__UpperCamelCase ,{} )
merge_dicts(__UpperCamelCase ,__UpperCamelCase )
else:
A_ = value
return destination
def __snake_case ( __UpperCamelCase : int = None ):
"""simple docstring"""
if port is None:
A_ = 2_9500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 329 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __snake_case ( ):
"""simple docstring"""
print("Making key files..." )
make_key_files("rsa" ,1024 )
print("Key files generation successful." )
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
print("Generating prime p..." )
A_ = rabinMiller.generate_large_prime(__UpperCamelCase )
print("Generating prime q..." )
A_ = rabinMiller.generate_large_prime(__UpperCamelCase )
A_ = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
A_ = random.randrange(2 ** (key_size - 1) ,2 ** (key_size) )
if cryptoMath.gcd(__UpperCamelCase ,(p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
A_ = cryptoMath.find_mod_inverse(__UpperCamelCase ,(p - 1) * (q - 1) )
A_ = (n, e)
A_ = (n, d)
return (public_key, private_key)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : int ):
"""simple docstring"""
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print("\nWARNING:" )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"Use a different name or delete these files and re-run this program." )
sys.exit()
A_ , A_ = generate_key(__UpperCamelCase )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' ,"w" ) as out_file:
out_file.write(f'''{key_size},{public_key[0]},{public_key[1]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' ,"w" ) as out_file:
out_file.write(f'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 354 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ):
A_ = tempfile.mkdtemp()
A_ = BlipImageProcessor()
A_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
A_ = BlipProcessor(UpperCAmelCase , UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer
def __A ( self : Optional[Any] , **UpperCAmelCase : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def __A ( self : Any ):
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ):
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : Any ):
A_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
A_ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCAmelCase , return_tensors="np" )
A_ = processor(images=UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self : int ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = processor(text=UpperCAmelCase )
A_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Tuple ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def __A ( self : Any ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(UpperCAmelCase )
A_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 329 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : PreTrainedTokenizer ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] = None ,):
"""simple docstring"""
A_ = {}
if train_file is not None:
A_ = [train_file]
if eval_file is not None:
A_ = [eval_file]
if test_file is not None:
A_ = [test_file]
A_ = datasets.load_dataset("csv" ,data_files=__UpperCamelCase )
A_ = list(ds[list(files.keys() )[0]].features.keys() )
A_ = features_name.pop(__UpperCamelCase )
A_ = list(set(ds[list(files.keys() )[0]][label_name] ) )
A_ = {label: i for i, label in enumerate(__UpperCamelCase )}
A_ = tokenizer.model_input_names
A_ = {}
if len(__UpperCamelCase ) == 1:
for k in files.keys():
A_ = ds[k].map(
lambda __UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase ,padding="max_length" ) ,batched=__UpperCamelCase ,)
elif len(__UpperCamelCase ) == 2:
for k in files.keys():
A_ = ds[k].map(
lambda __UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase ,padding="max_length" ,) ,batched=__UpperCamelCase ,)
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A_ = {k: v for k, v in ex.items() if k in input_names}
A_ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A_ = {k: v for k, v in ex.items() if k in input_names}
A_ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A_ = {k: v for k, v in ex.items() if k in input_names}
A_ = labelaid[ex[label_name]]
yield (d, label)
A_ = (
tf.data.Dataset.from_generator(
__UpperCamelCase ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A_ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A_ = (
tf.data.Dataset.from_generator(
__UpperCamelCase ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A_ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A_ = (
tf.data.Dataset.from_generator(
__UpperCamelCase ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A_ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__a :Dict = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : int = field(metadata={'help': 'Which column contains the label'} )
_lowerCamelCase : str = field(default=snake_case_ , metadata={'help': 'The path of the training file'} )
_lowerCamelCase : Optional[str] = field(default=snake_case_ , metadata={'help': 'The path of the development file'} )
_lowerCamelCase : Optional[str] = field(default=snake_case_ , metadata={'help': 'The path of the test file'} )
_lowerCamelCase : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowerCamelCase : bool = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCamelCase : bool = field(default=snake_case_ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowerCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def __snake_case ( ):
"""simple docstring"""
A_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A_ , A_ , A_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO ,)
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
A_ , A_ , A_ , A_ = get_tfds(
train_file=data_args.train_file ,eval_file=data_args.dev_file ,test_file=data_args.test_file ,tokenizer=__UpperCamelCase ,label_column_id=data_args.label_column_id ,max_seq_length=data_args.max_seq_length ,)
A_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=len(__UpperCamelCase ) ,labelaid=__UpperCamelCase ,idalabel={id: label for label, id in labelaid.items()} ,finetuning_task="text-classification" ,cache_dir=model_args.cache_dir ,)
with training_args.strategy.scope():
A_ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_pt=bool(".bin" in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,)
def compute_metrics(__UpperCamelCase : EvalPrediction ) -> Dict:
A_ = np.argmax(p.predictions ,axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A_ = TFTrainer(
model=__UpperCamelCase ,args=__UpperCamelCase ,train_dataset=__UpperCamelCase ,eval_dataset=__UpperCamelCase ,compute_metrics=__UpperCamelCase ,)
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A_ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A_ = trainer.evaluate()
A_ = os.path.join(training_args.output_dir ,"eval_results.txt" )
with open(__UpperCamelCase ,"w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 355 |
import math
__a :Union[str, Any] = 10
__a :Union[str, Any] = 7
__a :int = BALLS_PER_COLOUR * NUM_COLOURS
def __snake_case ( __UpperCamelCase : int = 20 ):
"""simple docstring"""
A_ = math.comb(__UpperCamelCase ,__UpperCamelCase )
A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase )
A_ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 329 | 0 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a :Optional[Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 356 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a :Optional[Any] = logging.get_logger(__name__)
__a :Any = {'vocab_file': 'vocab.txt'}
__a :Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a :List[str] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a :List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = ConvBertTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 329 | 0 |
import math
__a :Union[str, Any] = 10
__a :Union[str, Any] = 7
__a :int = BALLS_PER_COLOUR * NUM_COLOURS
def __snake_case ( __UpperCamelCase : int = 20 ):
"""simple docstring"""
A_ = math.comb(__UpperCamelCase ,__UpperCamelCase )
A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase )
A_ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 357 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a :Optional[Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 329 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _a :
"""simple docstring"""
def __A ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple ):
return None
class _a :
"""simple docstring"""
def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] ):
return None
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Dict = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __A ( self : Union[str, Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase , "tf" , 12 , **UpperCAmelCase )
@require_torch
@slow
def __A ( self : int ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase , "pt" , 12 , **UpperCAmelCase )
@require_torch
@slow
def __A ( self : Optional[int] ):
from transformers import BertModel
A_ = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(UpperCAmelCase ) )
vocab_file.flush()
A_ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
A_ = BertModel(BertConfig(vocab_size=len(UpperCAmelCase ) ) )
model.save_pretrained(UpperCAmelCase )
self._test_export(UpperCAmelCase , "pt" , 12 , UpperCAmelCase )
@require_tf
@slow
def __A ( self : int ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A_ = self._test_export(UpperCAmelCase , "tf" , 12 , **UpperCAmelCase )
A_ = quantize(Path(UpperCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def __A ( self : Optional[int] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A_ = self._test_export(UpperCAmelCase , "pt" , 12 , **UpperCAmelCase )
A_ = quantize(UpperCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def __A ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=None , **UpperCAmelCase : List[str] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
A_ = Path(UpperCAmelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
return path
except Exception as e:
self.fail(UpperCAmelCase )
@require_torch
@require_tokenizers
@slow
def __A ( self : Tuple ):
from transformers import BertModel
A_ = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
A_ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(UpperCAmelCase , UpperCAmelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def __A ( self : Optional[Any] ):
from transformers import TFBertModel
A_ = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
A_ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(UpperCAmelCase , UpperCAmelCase , "tf" )
def __A ( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : int ):
A_ = FeatureExtractionPipeline(UpperCAmelCase , UpperCAmelCase )
A_ = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
A_ , A_ , A_ , A_ = infer_shapes(UpperCAmelCase , UpperCAmelCase )
# Assert all variables are present
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , UpperCAmelCase )
self.assertSequenceEqual(variable_names[3:] , UpperCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def __A ( self : Optional[int] ):
A_ = ["input_ids", "attention_mask", "token_type_ids"]
A_ = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
A_ , A_ = ensure_valid_input(FuncContiguousArgs() , UpperCAmelCase , UpperCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(UpperCAmelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(UpperCAmelCase ) , set(UpperCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(UpperCAmelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
A_ , A_ = ensure_valid_input(FuncNonContiguousArgs() , UpperCAmelCase , UpperCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(UpperCAmelCase ) , 1 )
self.assertEqual(len(UpperCAmelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def __A ( self : Union[str, Any] ):
A_ = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 358 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __A ( self : List[str] ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@require_tf
def __A ( self : int ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __A ( self : Any ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __A ( self : Optional[Any] ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 329 | 0 |
class _a :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Any ):
# we need a list not a string, so do something to change the type
A_ = arr.split("," )
def __A ( self : Union[str, Any] ):
A_ = [int(self.array[0] )] * len(self.array )
A_ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
A_ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
A_ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__a :str = input('please input some numbers:')
__a :int = SubArray(whole_array)
__a :Any = array.solve_sub_array()
print(('the results is:', re))
| 359 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ):
"""simple docstring"""
A_ = []
for _ in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ):
"""simple docstring"""
A_ = []
for step in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase ,"schedule.bin" )
torch.save(scheduler.state_dict() ,__UpperCamelCase )
A_ = torch.load(__UpperCamelCase )
scheduler.load_state_dict(__UpperCamelCase )
return lrs
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def __A ( self : Dict ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , )
for _ in range(1000 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_lowerCamelCase : Any = 1_0
def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ , A_ = data
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ = unwrap_schedule(UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule
A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : List[str] ):
A_ = fn
def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ):
return self.fn(*UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A ( self : Dict , UpperCAmelCase : List[str] ):
A_ = list(map(self , scheduler.lr_lambdas ) )
| 329 | 0 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : list[int] ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = 0
A_ = len(__UpperCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A_ = i + 1
else:
A_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{two_pointer([2, 7, 11, 15], 9) = }")
| 360 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : int
_lowerCamelCase : str
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Dict ):
A_ = {}
A_ = []
A_ = 1
A_ = [1, 2]
A_ = {"a": 1, "b": 2}
A_ = {"a": [1, 2], "b": [3, 4]}
A_ = {"a": {"1": 1}, "b": 2}
A_ = {"a": 1, "b": 2, "c": 3, "d": 4}
A_ = {}
A_ = []
A_ = 2
A_ = [2, 3]
A_ = {"a": 2, "b": 3}
A_ = {"a": [2, 3], "b": [4, 5]}
A_ = {"a": {"1": 2}, "b": 3}
A_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
A_ = 2
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
A_ = {"a": 2, "b": 0, "c": 2}
A_ = {
"a": np.eye(2 ).astype(UpperCAmelCase ),
"b": np.zeros(3 ).astype(UpperCAmelCase ),
"c": np.ones(2 ).astype(UpperCAmelCase ),
}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase )
def __A ( self : List[str] ):
A_ = {"a": 1, "b": 2}
A_ = {"a": 3, "b": 4}
A_ = {"a": 5, "b": 6}
A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase )
def __A ( self : Any ):
class _a :
"""simple docstring"""
_lowerCamelCase : int = 'bar'
A_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
A_ = {f'''{i}''': i for i in range(__UpperCamelCase )}
A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _a ( snake_case_ ):
"""simple docstring"""
@require_tf
def __A ( self : Union[str, Any] ):
import tensorflow as tf
from tensorflow.keras import layers
A_ = layers.Dense(2 )
def gen_random_output():
A_ = tf.random.uniform((1, 3) )
return model(UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __A ( self : Optional[int] ):
import torch
def gen_random_output():
A_ = torch.nn.Linear(3 , 2 )
A_ = torch.rand(1 , 3 )
return model(UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __A ( self : Any ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A_ = gen_random_output()
with temp_seed(42 ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" ,[{}] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" ,[
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] ,)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def __snake_case ( ):
"""simple docstring"""
A_ = A(x=1 ,y="foobar" )
A_ = {"x": 1, "y": "foobar"}
assert asdict(__UpperCamelCase ) == expected_output
A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]}
A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 ,y="foo" )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return text.split()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __snake_case ( ):
"""simple docstring"""
with Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A_ = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__UpperCamelCase ) == 4
| 329 | 0 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : dict ,__UpperCamelCase : str ):
"""simple docstring"""
A_ , A_ = set(__UpperCamelCase ), [start]
while stack:
A_ = stack.pop()
explored.add(__UpperCamelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__UpperCamelCase )
return explored
__a :int = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 361 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for char in word:
A_ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
for token in tokens:
A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A_ = list(__UpperCamelCase )
return word_list
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(__UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
A_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
A_ = "##" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ):
"""simple docstring"""
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
A_ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__a :Dict = parser.parse_args()
main(args)
| 329 | 0 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__a :List[Any] = pytest.mark.integration
__a :str = {'comet'}
__a :Any = importlib.util.find_spec('fairseq') is not None
__a :Union[str, Any] = {'code_eval'}
__a :Optional[Any] = os.name == 'nt'
__a :str = {'bertscore', 'frugalscore', 'perplexity'}
__a :Tuple = importlib.util.find_spec('transformers') is not None
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
@wraps(__UpperCamelCase )
def wrapper(self : Any ,__UpperCamelCase : Tuple ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self ,__UpperCamelCase )
return wrapper
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
@wraps(__UpperCamelCase )
def wrapper(self : Tuple ,__UpperCamelCase : List[str] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self ,__UpperCamelCase )
return wrapper
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
@wraps(__UpperCamelCase )
def wrapper(self : Optional[int] ,__UpperCamelCase : str ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self ,__UpperCamelCase )
return wrapper
def __snake_case ( ):
"""simple docstring"""
A_ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
snake_case_ , snake_case_ , snake_case_ )
@local
class _a ( parameterized.TestCase ):
_lowerCamelCase : str = {}
_lowerCamelCase : str = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def __A ( self : Any , UpperCAmelCase : Optional[Any] ):
A_ = "[...]"
A_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , UpperCAmelCase ) ).module_path )
A_ = datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCAmelCase )
# check parameters
A_ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(UpperCAmelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
A_ = doctest.testmod(UpperCAmelCase , verbose=UpperCAmelCase , raise_on_error=UpperCAmelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
A_ = "[...]"
A_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , UpperCAmelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
A_ = doctest.testmod(UpperCAmelCase , verbose=UpperCAmelCase , raise_on_error=UpperCAmelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __A ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCAmelCase ):
yield
else:
yield
@contextmanager
def __A ( self : List[Any] ):
def load_local_metric(UpperCAmelCase : int , *UpperCAmelCase : List[str] , **UpperCAmelCase : Tuple ):
return load_metric(os.path.join("metrics" , UpperCAmelCase ) , *UpperCAmelCase , **UpperCAmelCase )
with patch("datasets.load_metric" ) as mock_load_metric:
A_ = load_local_metric
yield
@classmethod
def __A ( cls : List[Any] , UpperCAmelCase : str ):
def wrapper(UpperCAmelCase : Optional[Any] ):
A_ = contextmanager(UpperCAmelCase )
A_ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" ,"" ,"" ) # handle pytest cli flags
class _a ( snake_case_ ):
def __A ( self : str , UpperCAmelCase : Union[str, Any] ):
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
A_ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
import torch
def bert_cos_score_idf(__UpperCamelCase : List[str] ,__UpperCamelCase : int ,*__UpperCamelCase : Tuple ,**__UpperCamelCase : Tuple ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__UpperCamelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
A_ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
def load_from_checkpoint(__UpperCamelCase : int ):
class _a :
def __A ( self : Any , UpperCAmelCase : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ):
assert len(UpperCAmelCase ) == 2
A_ = [0.19, 0.92]
return scores, sum(UpperCAmelCase ) / len(UpperCAmelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
A_ = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
A_ = load_from_checkpoint
yield
def __snake_case ( ):
"""simple docstring"""
A_ = load_metric(os.path.join("metrics" ,"seqeval" ) )
A_ = "ERROR"
A_ = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(__UpperCamelCase ,match=re.escape(__UpperCamelCase ) ):
metric.compute(predictions=[] ,references=[] ,scheme=__UpperCamelCase )
| 362 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __snake_case ( __UpperCamelCase : Features ):
"""simple docstring"""
A_ = np.inf
def set_batch_size(__UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and feature.dtype == "binary":
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCamelCase ,__UpperCamelCase )
return None if batch_size is np.inf else batch_size
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Tuple , ):
super().__init__(
UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , )
A_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES["parquet"][1]
A_ = Parquet(
cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , hash=UpperCAmelCase , **UpperCAmelCase , )
def __A ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : Union[PathLike, BinaryIO] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ):
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self : int ):
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
A_ = self._write(file_obj=UpperCAmelCase , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
return written
def __A ( self : Tuple , UpperCAmelCase : BinaryIO , UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ):
A_ = 0
A_ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase , **UpperCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
A_ = query_table(
table=self.dataset._data , key=slice(UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCAmelCase )
written += batch.nbytes
writer.close()
return written
| 329 | 0 |
import os
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = len(grid[0] )
A_ = len(__UpperCamelCase )
A_ = 0
A_ = 0
A_ = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__UpperCamelCase ):
for j in range(n_rows - 3 ):
A_ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
A_ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
A_ = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
A_ = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
A_ = max(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if max_product > largest:
A_ = max_product
return largest
def __snake_case ( ):
"""simple docstring"""
A_ = []
with open(os.path.dirname(__UpperCamelCase ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
A_ = [[int(__UpperCamelCase ) for i in grid[j]] for j in range(len(__UpperCamelCase ) )]
return largest_product(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 363 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int = 4 ):
"""simple docstring"""
A_ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = matrix[::-1]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [x[::-1] for x in matrix]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 329 | 0 |
from heapq import heappop, heappush
import numpy as np
def __snake_case ( __UpperCamelCase : np.ndarray ,__UpperCamelCase : tuple[int, int] ,__UpperCamelCase : tuple[int, int] ,__UpperCamelCase : bool ,):
"""simple docstring"""
A_ , A_ = grid.shape
A_ = [-1, 1, 0, 0]
A_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
A_ , A_ = [(0, source)], set()
A_ = np.full((rows, cols) ,np.inf )
A_ = 0
A_ = np.empty((rows, cols) ,dtype=__UpperCamelCase )
A_ = None
while queue:
((A_) , (A_)) = heappop(__UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
A_ = []
while (x, y) != source:
path.append((x, y) )
A_ , A_ = predecessors[x, y]
path.append(__UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__UpperCamelCase ) ):
A_ , A_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
A_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__UpperCamelCase ,(dist + 1, (nx, ny)) )
A_ = dist + 1
A_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Any = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 329 | 0 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int = 4 ):
"""simple docstring"""
A_ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = matrix[::-1]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [x[::-1] for x in matrix]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 365 |
import itertools
import math
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( ):
"""simple docstring"""
A_ = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __snake_case ( __UpperCamelCase : int = 1_0001 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Any , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = False
A_ = True
A_ = 99
A_ = 32
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = None
def __A ( self : str ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple ):
A_ = TFDistilBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask}
A_ = model(UpperCAmelCase )
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] ):
A_ = TFDistilBertForMaskedLM(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] ):
A_ = TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : str , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFDistilBertForSequenceClassification(UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] ):
A_ = self.num_choices
A_ = TFDistilBertForMultipleChoice(UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : str , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFDistilBertForTokenClassification(UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Tuple ):
A_ = self.prepare_config_and_inputs()
((A_) , (A_) , (A_) , (A_) , (A_) , (A_)) = config_and_inputs
A_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_lowerCamelCase : Optional[int] = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : Optional[int] ):
A_ = TFDistilBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def __A ( self : List[Any] ):
self.config_tester.run_common_tests()
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def __A ( self : Any ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def __A ( self : str ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def __A ( self : str ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : Optional[int] ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
A_ = TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : List[str] ):
A_ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
| 366 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
| 329 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Tuple=99 , UpperCAmelCase : List[Any]=24 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Tuple=512 , UpperCAmelCase : Dict=16 , UpperCAmelCase : int=2 , UpperCAmelCase : Dict=0.02 , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=1000 , ):
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = scope
A_ = range_bbox
def __A ( self : str ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ = bbox[i, j, 3]
A_ = bbox[i, j, 1]
A_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ = bbox[i, j, 2]
A_ = bbox[i, j, 0]
A_ = t
A_ = None
if self.use_input_mask:
A_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __A ( self : int ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __A ( self : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , ):
A_ = LiltModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , bbox=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
A_ = model(UpperCAmelCase , bbox=UpperCAmelCase , token_type_ids=UpperCAmelCase )
A_ = model(UpperCAmelCase , bbox=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , ):
A_ = self.num_labels
A_ = LiltForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , ):
A_ = LiltForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : Dict ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _a ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : int = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Union[str, Any] = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Any = False
def __A ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ):
return True
def __A ( self : Dict ):
A_ = LiltModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __A ( self : Union[str, Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ = type
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
def __A ( self : Union[str, Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
@slow
def __A ( self : List[str] ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = LiltModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any ):
A_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(UpperCAmelCase )
A_ = torch.tensor([[1, 2]] , device=UpperCAmelCase )
A_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase )
# forward pass
with torch.no_grad():
A_ = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase )
A_ = torch.Size([1, 2, 768] )
A_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase , atol=1E-3 ) )
| 367 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Dict = logging.get_logger(__name__)
__a :int = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'realm'
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
# Common config
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = retriever_proj_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_candidates
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
# Reader config
A_ = span_hidden_size
A_ = max_span_width
A_ = reader_layer_norm_eps
A_ = reader_beam_size
A_ = reader_seq_len
# Retrieval config
A_ = num_block_records
A_ = searcher_beam_size
| 329 | 0 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int | str ):
"""simple docstring"""
A_ = str(__UpperCamelCase )
return n == n[::-1]
def __snake_case ( __UpperCamelCase : int = 100_0000 ):
"""simple docstring"""
A_ = 0
for i in range(1 ,__UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 368 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = original_name.split("." )[0]
A_ = key.split("." )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
A_ = orig_block_num - offset
A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = OrderedDict()
A_ , A_ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
A_ = key.replace("network" ,"poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
A_ = key[: key.find("proj" )]
A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' )
A_ = key.replace("proj" ,"projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
A_ = "poolformer.encoder." + key
if "mlp.fc1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" )
if "mlp.fc2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" )
if "norm1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" )
if "norm2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" )
if "layer_scale_1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" )
if "layer_scale_2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" )
if "head" in key:
A_ = key.replace("head" ,"classifier" )
A_ = value
return new_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = PoolFormerConfig()
# set attributes based on model_name
A_ = "huggingface/label-files"
A_ = model_name[-3:]
A_ = 1000
A_ = "imagenet-1k-id2label.json"
A_ = (1, 1000)
# set config attributes
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ = [2, 2, 6, 2]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s24":
A_ = [4, 4, 12, 4]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s36":
A_ = [6, 6, 18, 6]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 1E-6
A_ = 0.9
elif size == "m36":
A_ = [6, 6, 18, 6]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
elif size == "m48":
A_ = [8, 8, 24, 8]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) )
# rename keys
A_ = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
A_ = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values
# forward pass
A_ = model(__UpperCamelCase )
A_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
A_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
A_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
A_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
A_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__a :int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.