code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Dict = dataset
_lowercase : Dict = process
_lowercase : Optional[int] = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self ,UpperCAmelCase_ ):
_lowercase : Dict = self.dataset[i]
_lowercase : str = self.process(UpperCAmelCase_ ,**self.params )
return processed
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
_lowercase : Optional[Any] = loader
_lowercase : Any = infer
_lowercase : Tuple = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowercase : Tuple = None
_lowercase : int = loader_batch_size
# Internal bookkeeping
_lowercase : int = None
_lowercase : Optional[Any] = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_lowercase : Optional[Any] = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
if isinstance(self._loader_batch_data ,torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_lowercase : Optional[int] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowercase : Dict = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
# Convert ModelOutput to tuple first
_lowercase : int = element.to_tuple()
if isinstance(element[0] ,torch.Tensor ):
_lowercase : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
_lowercase : Dict = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] ,torch.Tensor ):
_lowercase : List[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
_lowercase : int = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_lowercase : Union[str, Any] = None
elif isinstance(element[self._loader_batch_index] ,torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowercase : str = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] ,np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowercase : Optional[Any] = np.expand_dims(element[self._loader_batch_index] ,0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_lowercase : int = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowercase : int = self._loader_batch_data.__class__(UpperCAmelCase_ )
self._loader_batch_index += 1
return result
def lowerCamelCase__ ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowercase : List[str] = next(self.iterator )
_lowercase : List[str] = self.infer(UpperCAmelCase_ ,**self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCAmelCase_ ,torch.Tensor ):
_lowercase : int = processed
else:
_lowercase : Optional[Any] = list(processed.keys() )[0]
_lowercase : int = processed[key]
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : List[Any] = len(UpperCAmelCase_ )
else:
_lowercase : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowercase : Optional[Any] = observed_batch_size
# Setting internal index to unwrap the batch
_lowercase : Union[str, Any] = processed
_lowercase : Union[str, Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def __iter__( self ):
_lowercase : List[Any] = iter(self.loader )
_lowercase : Any = None
return self
def lowerCamelCase__ ( self ):
if self.subiterator is None:
_lowercase : int = self.infer(next(self.iterator ) ,**self.params )
try:
# Try to return next item
_lowercase : int = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowercase : Dict = self.infer(next(self.iterator ) ,**self.params )
_lowercase : List[str] = next(self.subiterator )
return processed
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __iter__( self ):
_lowercase : Any = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_lowercase : int = False
_lowercase : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowercase : Union[str, Any] = self.loader_batch_item()
_lowercase : Optional[int] = item.pop("""is_last""" )
accumulator.append(UpperCAmelCase_ )
if is_last:
return accumulator
while not is_last:
_lowercase : Optional[int] = self.infer(next(self.iterator ) ,**self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCAmelCase_ ,torch.Tensor ):
_lowercase : Any = processed
else:
_lowercase : int = list(processed.keys() )[0]
_lowercase : str = processed[key]
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : str = len(UpperCAmelCase_ )
else:
_lowercase : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowercase : Any = observed_batch_size
_lowercase : List[str] = processed
_lowercase : Union[str, Any] = 0
while self._loader_batch_index < self.loader_batch_size:
_lowercase : Optional[int] = self.loader_batch_item()
_lowercase : List[str] = item.pop("""is_last""" )
accumulator.append(UpperCAmelCase_ )
if is_last:
return accumulator
else:
_lowercase : List[Any] = processed
_lowercase : List[str] = item.pop("""is_last""" )
accumulator.append(UpperCAmelCase_ )
return accumulator
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[int] = dataset
_lowercase : Optional[int] = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self ,UpperCAmelCase_ ):
return self.dataset[i][self.key]
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = dataset
_lowercase : Optional[int] = keya
_lowercase : Any = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self ,UpperCAmelCase_ ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 336 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase: Tuple = [0, 25, 50]
UpperCAmelCase: List[Any] = [25, 50, 75]
UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase: Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase: List[Any] = np.ones(75)
UpperCAmelCase: Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase: int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase: int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 336 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Dict = {"""unk_token""": """<unk>"""}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,UpperCAmelCase_ )
self.assertIn("""attention_mask""" ,UpperCAmelCase_ )
self.assertNotIn("""labels""" ,UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : List[Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ["""A long paragraph for summarization."""]
_lowercase : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : Union[str, Any] = inputs["""input_ids"""]
_lowercase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : str = ["""Summary of the text.""", """Another summary."""]
_lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ )
_lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]]
_lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = """A, <mask> AllenNLP sentence."""
_lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 336 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_lowercase : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
_lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
_lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : List[Any] = self.get_image_processor()
_lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ )
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
_lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : int = self.prepare_image_inputs()
_lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" )
_lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : List[Any] = """lower newer"""
_lowercase : Any = processor(text=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : str = """lower newer"""
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(UpperCAmelCase_ )
_lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Optional[Any] = """lower newer"""
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 336 | 1 |
"""simple docstring"""
import random
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
_lowercase : dict = {i: [] for i in range(__UpperCAmelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__UpperCAmelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__UpperCAmelCase ):
for j in range(i + 1 , __UpperCAmelCase ):
if random.random() < probability:
graph[i].append(__UpperCAmelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__UpperCAmelCase )
return graph
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return {
i: [j for j in range(__UpperCAmelCase ) if i != j] for i in range(__UpperCAmelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ):
import pyspark
def generate_fn():
_lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
_lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" )
_lowercase : int = partition_df.collect()
_lowercase : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,):
_lowercase : Union[str, Any] = df
_lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
@property
def lowerCamelCase__ ( self ):
return len(self.partition_order )
class UpperCamelCase ( datasets.DatasetBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = SparkConfig
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
import pyspark
_lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowercase : List[Any] = df
_lowercase : int = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ )
_lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ ,"""a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowercase : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowerCamelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
_lowercase : List[str] = self.df.count()
_lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowercase : Union[str, Any] = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowercase : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) )
_lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
import pyspark
_lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
_lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
_lowercase : Any = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowercase : Union[str, Any] = self.config.features
_lowercase : Optional[int] = self._writer_batch_size
_lowercase : Optional[Any] = self._fs.storage_options
def write_arrow(UpperCAmelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowercase : Any = pyspark.TaskContext().taskAttemptId()
_lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
_lowercase : List[Any] = 0
_lowercase : int = writer_class(
features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Optional[int] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowercase , _lowercase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
shard_id += 1
_lowercase : Union[str, Any] = writer_class(
features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Dict = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
_lowercase , _lowercase : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
_lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : List[str] = (
self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
self._validate_cache_dir()
_lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
_lowercase : Optional[int] = not is_remote_filesystem(self._fs )
_lowercase : Dict = os.path.join if is_local else posixpath.join
_lowercase : int = """-TTTTT-SSSSS-of-NNNNN"""
_lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ )
_lowercase : List[Any] = 0
_lowercase : Optional[Any] = 0
_lowercase : int = 0
_lowercase : Any = []
_lowercase : Any = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
_lowercase : Optional[int] = total_num_examples
_lowercase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_lowercase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowercase : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
rename(
UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,)
_lowercase : Optional[Any] = []
_lowercase : List[str] = 0
for i in range(len(UpperCAmelCase_ ) ):
_lowercase , _lowercase : List[str] = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
_lowercase : Tuple = 0
_lowercase : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,):
return SparkExamplesIterable(self.df )
| 336 | 1 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __get__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
_lowercase : Dict = """__cached_""" + self.fget.__name__
_lowercase : int = getattr(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
if cached is None:
_lowercase : str = self.fget(UpperCAmelCase_ )
setattr(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
return cached
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if is_torch_fx_proxy(__UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(__UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(__UpperCAmelCase , np.ndarray )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return isinstance(__UpperCAmelCase , np.ndarray )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return _is_numpy(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
import torch
return isinstance(__UpperCAmelCase , torch.Tensor )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return False if not is_torch_available() else _is_torch(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
import torch
return isinstance(__UpperCAmelCase , torch.device )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return False if not is_torch_available() else _is_torch_device(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
import torch
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = getattr(__UpperCAmelCase , __UpperCAmelCase )
else:
return False
return isinstance(__UpperCAmelCase , torch.dtype )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return False if not is_torch_available() else _is_torch_dtype(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
import tensorflow as tf
return isinstance(__UpperCAmelCase , tf.Tensor )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return False if not is_tf_available() else _is_tensorflow(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__UpperCAmelCase , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(__UpperCAmelCase )
return type(__UpperCAmelCase ) == tf.Tensor
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
import jax.numpy as jnp # noqa: F811
return isinstance(__UpperCAmelCase , jnp.ndarray )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return False if not is_flax_available() else _is_jax(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(__UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(__UpperCAmelCase , (list, tuple) ):
return [to_py_obj(__UpperCAmelCase ) for o in obj]
elif is_tf_tensor(__UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(__UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__UpperCAmelCase ):
return np.asarray(__UpperCAmelCase ).tolist()
elif isinstance(__UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(__UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(__UpperCAmelCase , (list, tuple) ):
return np.array(__UpperCAmelCase )
elif is_tf_tensor(__UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(__UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__UpperCAmelCase ):
return np.asarray(__UpperCAmelCase )
else:
return obj
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = fields(self )
# Safety and consistency checks
if not len(UpperCAmelCase_ ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
_lowercase : Optional[Any] = getattr(self ,class_fields[0].name )
_lowercase : Tuple = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[int] = first_field.items()
_lowercase : Optional[Any] = True
else:
try:
_lowercase : str = iter(UpperCAmelCase_ )
_lowercase : int = True
except TypeError:
_lowercase : List[str] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCAmelCase_ ):
if (
not isinstance(UpperCAmelCase_ ,(list, tuple) )
or not len(UpperCAmelCase_ ) == 2
or not isinstance(element[0] ,UpperCAmelCase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_lowercase : int = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
_lowercase : Any = element[1]
elif first_field is not None:
_lowercase : Dict = first_field
else:
for field in class_fields:
_lowercase : Optional[int] = getattr(self ,field.name )
if v is not None:
_lowercase : Tuple = v
def __delitem__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self ,UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[int] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCAmelCase_ ,UpperCAmelCase_ )
super().__setattr__(UpperCAmelCase_ ,UpperCAmelCase_ )
def __setitem__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
# Will raise a KeyException if needed
super().__setitem__(UpperCAmelCase_ ,UpperCAmelCase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
return tuple(self[k] for k in self.keys() )
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ):
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "longest"
SCREAMING_SNAKE_CASE_ : Optional[Any] = "max_length"
SCREAMING_SNAKE_CASE_ : Optional[int] = "do_not_pad"
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = "pt"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "tf"
SCREAMING_SNAKE_CASE_ : Optional[Any] = "np"
SCREAMING_SNAKE_CASE_ : Dict = "jax"
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
_lowercase : List[Any] = context_managers
_lowercase : Any = ExitStack()
def __enter__( self ):
for context_manager in self.context_managers:
self.stack.enter_context(UpperCAmelCase_ )
def __exit__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
self.stack.__exit__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : List[Any] = infer_framework(__UpperCAmelCase )
if framework == "tf":
_lowercase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowercase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowercase : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = model_class.__name__
_lowercase : Union[str, Any] = infer_framework(__UpperCAmelCase )
if framework == "tf":
_lowercase : int = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowercase : Union[str, Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowercase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase = "" , __UpperCAmelCase = "." ):
def _flatten_dict(__UpperCAmelCase , __UpperCAmelCase="" , __UpperCAmelCase="." ):
for k, v in d.items():
_lowercase : Any = str(__UpperCAmelCase ) + delimiter + str(__UpperCAmelCase ) if parent_key else k
if v and isinstance(__UpperCAmelCase , __UpperCAmelCase ):
yield from flatten_dict(__UpperCAmelCase , __UpperCAmelCase , delimiter=__UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) )
@contextmanager
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if is_numpy_array(__UpperCAmelCase ):
return np.transpose(__UpperCAmelCase , axes=__UpperCAmelCase )
elif is_torch_tensor(__UpperCAmelCase ):
return array.T if axes is None else array.permute(*__UpperCAmelCase )
elif is_tf_tensor(__UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(__UpperCAmelCase , perm=__UpperCAmelCase )
elif is_jax_tensor(__UpperCAmelCase ):
return jnp.transpose(__UpperCAmelCase , axes=__UpperCAmelCase )
else:
raise ValueError(F"""Type not supported for transpose: {type(__UpperCAmelCase )}.""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if is_numpy_array(__UpperCAmelCase ):
return np.reshape(__UpperCAmelCase , __UpperCAmelCase )
elif is_torch_tensor(__UpperCAmelCase ):
return array.reshape(*__UpperCAmelCase )
elif is_tf_tensor(__UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(__UpperCAmelCase , __UpperCAmelCase )
elif is_jax_tensor(__UpperCAmelCase ):
return jnp.reshape(__UpperCAmelCase , __UpperCAmelCase )
else:
raise ValueError(F"""Type not supported for reshape: {type(__UpperCAmelCase )}.""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if is_numpy_array(__UpperCAmelCase ):
return np.squeeze(__UpperCAmelCase , axis=__UpperCAmelCase )
elif is_torch_tensor(__UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=__UpperCAmelCase )
elif is_tf_tensor(__UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(__UpperCAmelCase , axis=__UpperCAmelCase )
elif is_jax_tensor(__UpperCAmelCase ):
return jnp.squeeze(__UpperCAmelCase , axis=__UpperCAmelCase )
else:
raise ValueError(F"""Type not supported for squeeze: {type(__UpperCAmelCase )}.""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if is_numpy_array(__UpperCAmelCase ):
return np.expand_dims(__UpperCAmelCase , __UpperCAmelCase )
elif is_torch_tensor(__UpperCAmelCase ):
return array.unsqueeze(dim=__UpperCAmelCase )
elif is_tf_tensor(__UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(__UpperCAmelCase , axis=__UpperCAmelCase )
elif is_jax_tensor(__UpperCAmelCase ):
return jnp.expand_dims(__UpperCAmelCase , axis=__UpperCAmelCase )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(__UpperCAmelCase )}.""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if is_numpy_array(__UpperCAmelCase ):
return np.size(__UpperCAmelCase )
elif is_torch_tensor(__UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(__UpperCAmelCase ):
import tensorflow as tf
return tf.size(__UpperCAmelCase )
elif is_jax_tensor(__UpperCAmelCase ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(__UpperCAmelCase )}.""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
for key, value in auto_map.items():
if isinstance(__UpperCAmelCase , (tuple, list) ):
_lowercase : Optional[Any] = [F"""{repo_id}--{v}""" if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
_lowercase : Any = F"""{repo_id}--{value}"""
return auto_map
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
for base_class in inspect.getmro(__UpperCAmelCase ):
_lowercase : str = base_class.__module__
_lowercase : Optional[Any] = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 336 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = """<s>"""
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(UpperCAmelCase_ ) ,10_06 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] )
_lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self ):
# fmt: off
_lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 336 | 1 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase: str = [True] * 1_000_001
UpperCAmelCase: List[Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
UpperCAmelCase: Optional[Any] = False
i += 1
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return seive[n]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return any(digit in """02468""" for digit in str(__UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1000000 ):
_lowercase : Any = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__UpperCAmelCase ) and not contains_an_even_digit(__UpperCAmelCase ):
_lowercase : Union[str, Any] = str(__UpperCAmelCase )
_lowercase : Optional[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__UpperCAmelCase ) )]
if all(is_prime(__UpperCAmelCase ) for i in list_nums ):
result.append(__UpperCAmelCase )
return result
def __SCREAMING_SNAKE_CASE ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'{len(find_circular_primes()) = }')
| 336 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 1 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCAmelCase: int = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
UpperCAmelCase: Dict = F'https://www.google.com/search?q={query}&num=100'
UpperCAmelCase: str = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
UpperCAmelCase: List[Any] = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
UpperCAmelCase: Tuple = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 336 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = []
for line in lines:
_lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(__UpperCAmelCase )
_lowercase : Tuple = """\n""".join(__UpperCAmelCase )
# Make a hash from all this code
_lowercase : Tuple = full_str.encode("""utf-8""" )
return shaaaa(__UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase: Tuple = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase: List[str] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
UpperCAmelCase: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 336 | 1 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ):
_lowercase : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowercase : str = math.floor(val / multiple ) * multiple
if x < min_val:
_lowercase : Dict = math.ceil(val / multiple ) * multiple
return x
_lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size
_lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase )
_lowercase , _lowercase : Union[str, Any] = output_size
# determine new height and width
_lowercase : str = output_height / input_height
_lowercase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowercase : str = scale_width
else:
# fit height
_lowercase : int = scale_height
_lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase )
_lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase )
return (new_height, new_width)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84}
_lowercase : str = get_size_dict(UpperCAmelCase_ )
_lowercase : Tuple = do_resize
_lowercase : Any = size
_lowercase : List[Any] = keep_aspect_ratio
_lowercase : Any = ensure_multiple_of
_lowercase : str = resample
_lowercase : Optional[Any] = do_rescale
_lowercase : List[Any] = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowercase : Dict = get_resize_output_image_size(
UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,)
return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : int = get_size_dict(UpperCAmelCase_ )
_lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowercase : List[str] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : int = image_std if image_std is not None else self.image_std
_lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
_lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images]
_lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images]
_lowercase : int = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase_ ):
_lowercase : Tuple = target_sizes.numpy()
_lowercase : Optional[Any] = []
for idx in range(len(UpperCAmelCase_ ) ):
_lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ )
_lowercase : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
_lowercase : Union[str, Any] = logits.argmax(dim=1 )
_lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 336 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase: List[str] = logging.get_logger(__name__)
UpperCAmelCase: Union[str, Any] = """▁"""
UpperCAmelCase: List[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase: Optional[Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
UpperCAmelCase: int = {
"""facebook/nllb-200-distilled-600M""": 1_024,
}
# fmt: off
UpperCAmelCase: Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="<unk>" ,UpperCAmelCase_="<pad>" ,UpperCAmelCase_="<mask>" ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_ = None ,UpperCAmelCase_=None ,UpperCAmelCase_=False ,**UpperCAmelCase_ ,):
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : List[str] = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else mask_token
_lowercase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
_lowercase : str = legacy_behaviour
super().__init__(
bos_token=UpperCAmelCase_ ,eos_token=UpperCAmelCase_ ,unk_token=UpperCAmelCase_ ,sep_token=UpperCAmelCase_ ,cls_token=UpperCAmelCase_ ,pad_token=UpperCAmelCase_ ,mask_token=UpperCAmelCase_ ,tokenizer_file=UpperCAmelCase_ ,src_lang=UpperCAmelCase_ ,tgt_lang=UpperCAmelCase_ ,additional_special_tokens=UpperCAmelCase_ ,sp_model_kwargs=self.sp_model_kwargs ,legacy_behaviour=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
_lowercase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowercase : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowercase : Tuple = 1
_lowercase : List[Any] = len(self.sp_model )
_lowercase : Any = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCAmelCase_ )
}
_lowercase : List[str] = {v: k for k, v in self.lang_code_to_id.items()}
_lowercase : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowercase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowercase : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_lowercase : Optional[int] = src_lang if src_lang is not None else """eng_Latn"""
_lowercase : Dict = self.lang_code_to_id[self._src_lang]
_lowercase : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
_lowercase : Optional[int] = self.__dict__.copy()
_lowercase : List[str] = None
_lowercase : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
_lowercase : Optional[int] = {}
_lowercase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCamelCase__ ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase__ ( self ):
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ ,token_ids_a=UpperCAmelCase_ ,already_has_special_tokens=UpperCAmelCase_ )
_lowercase : Any = [1] * len(self.prefix_tokens )
_lowercase : List[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + ([0] * len(UpperCAmelCase_ )) + suffix_ones
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : List[Any] = [self.sep_token_id]
_lowercase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowercase : List[Any] = src_lang
_lowercase : List[str] = self(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : int = self.convert_tokens_to_ids(UpperCAmelCase_ )
_lowercase : int = tgt_lang_id
return inputs
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return self.sp_model.encode(UpperCAmelCase_ ,out_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowercase : List[Any] = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Dict = """""".join(UpperCAmelCase_ ).replace(UpperCAmelCase_ ,""" """ ).strip()
return out_string
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase : int = os.path.join(
UpperCAmelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ ,"""wb""" ) as fi:
_lowercase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "eng_Latn" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "fra_Latn" ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = src_lang
_lowercase : List[str] = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Dict = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_lowercase : Optional[Any] = []
_lowercase : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
_lowercase : Optional[Any] = [self.cur_lang_code]
_lowercase : int = [self.eos_token_id]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Dict = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_lowercase : Optional[int] = []
_lowercase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowercase : int = [self.cur_lang_code]
_lowercase : List[str] = [self.eos_token_id]
| 336 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCAmelCase: Any = generate_large_matrix()
UpperCAmelCase: Dict = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
_lowercase : List[Any] = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowercase : Tuple = (left + right) // 2
_lowercase : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowercase : Dict = mid + 1
else:
_lowercase : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Any = 0
_lowercase : Optional[int] = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print("""Running benchmarks""" )
_lowercase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 336 | 1 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class UpperCamelCase ( ctypes.Structure ):
"""simple docstring"""
# _fields is a specific attr expected by ctypes
SCREAMING_SNAKE_CASE_ : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def __SCREAMING_SNAKE_CASE ( ):
if os.name == "nt":
_lowercase : Dict = CursorInfo()
_lowercase : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCAmelCase , ctypes.byref(__UpperCAmelCase ) )
_lowercase : Any = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCAmelCase , ctypes.byref(__UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def __SCREAMING_SNAKE_CASE ( ):
if os.name == "nt":
_lowercase : Optional[int] = CursorInfo()
_lowercase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCAmelCase , ctypes.byref(__UpperCAmelCase ) )
_lowercase : Any = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCAmelCase , ctypes.byref(__UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def __SCREAMING_SNAKE_CASE ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 336 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase: List[str] = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase: int = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 336 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase: Any = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model"
def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Tuple = intermediate_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = patch_size
_lowercase : Optional[Any] = image_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = attention_dropout
_lowercase : List[Any] = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : Tuple = qkv_bias
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer"
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : List[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Optional[Any] = hidden_act
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Tuple = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : Dict = cross_attention_frequency
_lowercase : Optional[Any] = encoder_hidden_size
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "instructblip"
SCREAMING_SNAKE_CASE_ : List[str] = True
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ):
super().__init__(**UpperCAmelCase_ )
if vision_config is None:
_lowercase : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_lowercase : Any = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_lowercase : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ )
_lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ )
_lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : Union[str, Any] = self.text_config.is_encoder_decoder
_lowercase : List[str] = num_query_tokens
_lowercase : List[str] = self.vision_config.hidden_size
_lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Union[str, Any] = 1.0
_lowercase : Dict = 0.02
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowercase : int = self.vision_config.to_dict()
_lowercase : Any = self.qformer_config.to_dict()
_lowercase : Any = self.text_config.to_dict()
_lowercase : Optional[int] = self.__class__.model_type
return output
| 336 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase : str = []
for i in range(__UpperCAmelCase ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ : str = 2
@register_to_config
def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,):
if trained_betas is not None:
_lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Any = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowercase : Tuple = 1.0 - self.betas
_lowercase : Dict = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
if schedule_timesteps is None:
_lowercase : Optional[int] = self.timesteps
_lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0
else:
_lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
_lowercase : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : str = self.index_for_timestep(UpperCAmelCase_ )
if self.state_in_first_order:
_lowercase : Optional[Any] = self.sigmas[step_index]
else:
_lowercase : Dict = self.sigmas_interpol[step_index]
_lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,):
_lowercase : List[str] = num_inference_steps
_lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ )
_lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ )
# interpolate sigmas
_lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
_lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowercase : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
# mps does not support float64
_lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa )
else:
_lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
# interpolate timesteps
_lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype )
_lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
_lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowercase : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# get log sigma
_lowercase : Optional[Any] = sigma.log()
# get distribution
_lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowercase : List[Any] = low_idx + 1
_lowercase : int = self.log_sigmas[low_idx]
_lowercase : Any = self.log_sigmas[high_idx]
# interpolate sigmas
_lowercase : Any = (low - log_sigma) / (low - high)
_lowercase : Dict = w.clamp(0 ,1 )
# transform interpolation to time range
_lowercase : List[str] = (1 - w) * low_idx + w * high_idx
_lowercase : Optional[int] = t.view(sigma.shape )
return t
@property
def lowerCamelCase__ ( self ):
return self.sample is None
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ )
# advance index counter by 1
_lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowercase : Any = self.sigmas[step_index]
_lowercase : Any = self.sigmas_interpol[step_index + 1]
_lowercase : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowercase : Union[str, Any] = self.sigmas[step_index - 1]
_lowercase : int = self.sigmas_interpol[step_index]
_lowercase : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowercase : Any = 0
_lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowercase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
_lowercase : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowercase : Optional[Any] = sigma_next - sigma_hat
_lowercase : Any = self.sample
_lowercase : Optional[int] = None
_lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ):
# mps does not support float64
_lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_lowercase : List[Any] = self.timesteps.to(original_samples.device )
_lowercase : Union[str, Any] = timesteps.to(original_samples.device )
_lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps]
_lowercase : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowercase : List[Any] = sigma.unsqueeze(-1 )
_lowercase : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 336 | 1 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 336 |
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 336 | 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Dict = [0] * len(__UpperCAmelCase )
for i in range(1 , len(__UpperCAmelCase ) ):
# use last results for better performance - dynamic programming
_lowercase : List[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase : str = j
return prefix_result
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return max(prefix_function(__UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : int
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_lowercase : Tuple = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowercase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_lowercase : Optional[Any] = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_lowercase : int = [""""""] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """
UpperCAmelCase: int = input(entry_msg).strip()
UpperCAmelCase: List[str] = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 336 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase: str = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Optional[Any] = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[Any] = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCAmelCase: List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 336 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
_lowercase : Tuple = randint(-5000 , 5000 )
return (arr, r)
UpperCAmelCase: int = make_dataset()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
for triplet in permutations(__UpperCAmelCase , 3 ):
if sum(__UpperCAmelCase ) == target:
return tuple(sorted(__UpperCAmelCase ) )
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
arr.sort()
_lowercase : Optional[Any] = len(__UpperCAmelCase )
for i in range(n - 1 ):
_lowercase , _lowercase : str = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Tuple = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_lowercase : Union[str, Any] = """
triplet_sum1(*dataset)
"""
_lowercase : Union[str, Any] = """
triplet_sum2(*dataset)
"""
_lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
_lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
return (min(__UpperCAmelCase ), min(__UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase: Any = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 336 | 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : List[str] = [[float("""inf""" ) for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
_lowercase : int = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__UpperCAmelCase ):
# looping through rows of graph array
for i in range(__UpperCAmelCase ):
# looping through columns of graph array
for j in range(__UpperCAmelCase ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_lowercase : Optional[int] = dist[i][k] + dist[k][j]
_print_dist(__UpperCAmelCase , __UpperCAmelCase )
return dist, v
if __name__ == "__main__":
UpperCAmelCase: Optional[Any] = int(input("""Enter number of vertices: """))
UpperCAmelCase: Any = int(input("""Enter number of edges: """))
UpperCAmelCase: Dict = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
UpperCAmelCase: Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
UpperCAmelCase: List[str] = int(input("""Enter source:"""))
UpperCAmelCase: Optional[int] = int(input("""Enter destination:"""))
UpperCAmelCase: int = float(input("""Enter weight:"""))
UpperCAmelCase: List[Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 336 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
# add QFormer tokenizer
_lowercase : Optional[int] = qformer_tokenizer
def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
_lowercase : List[Any] = BatchFeature()
if text is not None:
_lowercase : List[str] = self.tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
encoding.update(UpperCAmelCase_ )
_lowercase : Dict = self.qformer_tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : str = qformer_text_encoding.pop("""input_ids""" )
_lowercase : int = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
_lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.tokenizer.model_input_names
_lowercase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ )
_lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ )
return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" )
_lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
args.append(UpperCAmelCase_ )
return cls(*UpperCAmelCase_ )
| 336 | 1 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase: Any = datasets.utils.logging.get_logger(__name__)
class UpperCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : bool = None
SCREAMING_SNAKE_CASE_ : bool = None
class UpperCamelCase ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = datasets.Audio()
SCREAMING_SNAKE_CASE_ : Dict = "audio"
SCREAMING_SNAKE_CASE_ : List[Any] = AudioFolderConfig
SCREAMING_SNAKE_CASE_ : List[str] # definition at the bottom of the script
SCREAMING_SNAKE_CASE_ : List[str] = AudioClassification(audio_column="audio" , label_column="label" )
UpperCAmelCase: int = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
UpperCAmelCase: Dict = AUDIO_EXTENSIONS
| 336 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase: Tuple = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : Tuple = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=5 ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2_49 ,UpperCAmelCase_=6 ,UpperCAmelCase_=17 ,UpperCAmelCase_=25 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0006 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,**UpperCAmelCase_ ,):
_lowercase : Dict = vocab_size
_lowercase : List[str] = action_weight
_lowercase : int = reward_weight
_lowercase : List[Any] = value_weight
_lowercase : List[str] = max_position_embeddings
_lowercase : Any = block_size
_lowercase : Any = action_dim
_lowercase : List[str] = observation_dim
_lowercase : Union[str, Any] = transition_dim
_lowercase : str = learning_rate
_lowercase : Tuple = n_layer
_lowercase : Optional[int] = n_head
_lowercase : List[str] = n_embd
_lowercase : List[str] = embd_pdrop
_lowercase : Optional[Any] = attn_pdrop
_lowercase : List[Any] = resid_pdrop
_lowercase : str = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : List[Any] = kaiming_initializer_range
_lowercase : List[Any] = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
| 336 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=13 ,UpperCAmelCase_=7 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=99 ,UpperCAmelCase_=32 ,UpperCAmelCase_=2 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,UpperCAmelCase_="None" ,UpperCAmelCase_=3 ,UpperCAmelCase_=4 ,UpperCAmelCase_=None ,):
_lowercase : str = parent
_lowercase : Any = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : List[str] = is_training
_lowercase : int = use_input_mask
_lowercase : str = use_token_type_ids
_lowercase : Optional[Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : Dict = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : int = hidden_act
_lowercase : Any = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Dict = type_vocab_size
_lowercase : Optional[int] = type_sequence_label_size
_lowercase : Tuple = initializer_range
_lowercase : Union[str, Any] = num_labels
_lowercase : Union[str, Any] = num_choices
_lowercase : Optional[int] = relative_attention
_lowercase : Union[str, Any] = position_biased_input
_lowercase : Any = pos_att_type
_lowercase : Union[str, Any] = scope
def lowerCamelCase__ ( self ):
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase : Any = None
if self.use_input_mask:
_lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Dict = None
if self.use_token_type_ids:
_lowercase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : List[str] = None
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowercase : str = DebertaVaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,initializer_range=self.initializer_range ,return_dict=UpperCAmelCase_ ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : List[Any] = TFDebertaVaModel(config=UpperCAmelCase_ )
_lowercase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowercase : int = [input_ids, input_mask]
_lowercase : Optional[Any] = model(UpperCAmelCase_ )
_lowercase : Any = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : int = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowercase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.num_labels
_lowercase : List[Any] = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowercase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = self.num_labels
_lowercase : Tuple = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowercase : List[str] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : str = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_lowercase : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowercase : List[str] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = config_and_inputs
_lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Dict = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : str = False
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = TFDebertaVaModelTester(self )
_lowercase : str = ConfigTester(self ,config_class=UpperCAmelCase_ ,hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowerCamelCase__ ( self ):
_lowercase : List[str] = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def lowerCamelCase__ ( self ):
pass
@slow
def lowerCamelCase__ ( self ):
_lowercase : Tuple = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_lowercase : Tuple = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_lowercase : str = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowercase : Dict = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ )[0]
_lowercase : Any = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] ,UpperCAmelCase_ ,atol=1E-4 )
| 336 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase: Any = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model"
def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Tuple = intermediate_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = patch_size
_lowercase : Optional[Any] = image_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = attention_dropout
_lowercase : List[Any] = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : Tuple = qkv_bias
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer"
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : List[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Optional[Any] = hidden_act
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Tuple = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : Dict = cross_attention_frequency
_lowercase : Optional[Any] = encoder_hidden_size
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "instructblip"
SCREAMING_SNAKE_CASE_ : List[str] = True
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ):
super().__init__(**UpperCAmelCase_ )
if vision_config is None:
_lowercase : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_lowercase : Any = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_lowercase : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ )
_lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ )
_lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : Union[str, Any] = self.text_config.is_encoder_decoder
_lowercase : List[str] = num_query_tokens
_lowercase : List[str] = self.vision_config.hidden_size
_lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Union[str, Any] = 1.0
_lowercase : Dict = 0.02
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowercase : int = self.vision_config.to_dict()
_lowercase : Any = self.qformer_config.to_dict()
_lowercase : Any = self.text_config.to_dict()
_lowercase : Optional[int] = self.__class__.model_type
return output
| 336 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = "speech_to_text_2"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : Tuple = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self ,UpperCAmelCase_=1_00_00 ,UpperCAmelCase_=6 ,UpperCAmelCase_=20_48 ,UpperCAmelCase_=4 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=True ,UpperCAmelCase_="relu" ,UpperCAmelCase_=2_56 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=2 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=0 ,UpperCAmelCase_=2 ,UpperCAmelCase_=10_24 ,**UpperCAmelCase_ ,):
_lowercase : Dict = vocab_size
_lowercase : Dict = d_model
_lowercase : int = decoder_ffn_dim
_lowercase : Any = decoder_layers
_lowercase : Tuple = decoder_attention_heads
_lowercase : List[str] = dropout
_lowercase : str = attention_dropout
_lowercase : Optional[int] = activation_dropout
_lowercase : List[str] = activation_function
_lowercase : Any = init_std
_lowercase : Tuple = decoder_layerdrop
_lowercase : Optional[int] = use_cache
_lowercase : str = decoder_layers
_lowercase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase : List[str] = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,decoder_start_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
| 336 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = LayoutLMTokenizer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LayoutLMTokenizerFast
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : str = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Optional[int] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Dict = """UNwant\u00E9d,running"""
_lowercase : Any = """unwanted, running"""
return input_text, output_text
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.tokenizer_class(self.vocab_file )
_lowercase : Union[str, Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(UpperCAmelCase_ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[7, 4, 5, 10, 8, 9] )
def lowerCamelCase__ ( self ):
pass
| 336 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Dict = {"""unk_token""": """<unk>"""}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,UpperCAmelCase_ )
self.assertIn("""attention_mask""" ,UpperCAmelCase_ )
self.assertNotIn("""labels""" ,UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : List[Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ["""A long paragraph for summarization."""]
_lowercase : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : Union[str, Any] = inputs["""input_ids"""]
_lowercase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : str = ["""Summary of the text.""", """Another summary."""]
_lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ )
_lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]]
_lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = """A, <mask> AllenNLP sentence."""
_lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 336 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=13 ,UpperCAmelCase_=30 ,UpperCAmelCase_=2 ,UpperCAmelCase_=3 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=32 ,UpperCAmelCase_=2 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=10 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=3 ,UpperCAmelCase_=None ,):
_lowercase : Optional[int] = parent
_lowercase : str = batch_size
_lowercase : str = image_size
_lowercase : str = patch_size
_lowercase : str = num_channels
_lowercase : Tuple = is_training
_lowercase : Dict = use_labels
_lowercase : Union[str, Any] = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Dict = intermediate_size
_lowercase : Optional[int] = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = type_sequence_label_size
_lowercase : Tuple = initializer_range
_lowercase : Dict = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : int = (image_size // patch_size) ** 2
_lowercase : Dict = num_patches + 1
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : Tuple = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ):
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=UpperCAmelCase_ ,initializer_range=self.initializer_range ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = TFViTModel(config=UpperCAmelCase_ )
_lowercase : str = model(UpperCAmelCase_ ,training=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_lowercase : Any = self.image_size // 2
_lowercase : Optional[Any] = pixel_values[:, :, :image_size, :image_size]
_lowercase : Tuple = model(UpperCAmelCase_ ,interpolate_pos_encoding=UpperCAmelCase_ ,training=UpperCAmelCase_ )
_lowercase : List[Any] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, seq_length, self.hidden_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Any = self.type_sequence_label_size
_lowercase : int = TFViTForImageClassification(UpperCAmelCase_ )
_lowercase : List[str] = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ ,training=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_lowercase : Union[str, Any] = self.image_size // 2
_lowercase : Dict = pixel_values[:, :, :image_size, :image_size]
_lowercase : Optional[Any] = model(UpperCAmelCase_ ,interpolate_pos_encoding=UpperCAmelCase_ ,training=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowercase : Union[str, Any] = 1
_lowercase : Dict = TFViTForImageClassification(UpperCAmelCase_ )
_lowercase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self ):
_lowercase : int = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : str = config_and_inputs
_lowercase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : List[str] = False
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = TFViTModelTester(self )
_lowercase : Any = ConfigTester(self ,config_class=UpperCAmelCase_ ,has_text_modality=UpperCAmelCase_ ,hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Optional[Any] = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
_lowercase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ ,tf.keras.layers.Layer ) )
def lowerCamelCase__ ( self ):
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Optional[int] = model_class(UpperCAmelCase_ )
_lowercase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Optional[int] = [*signature.parameters.keys()]
_lowercase : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowerCamelCase__ ( self ):
_lowercase : Tuple = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCAmelCase_ )
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self ):
_lowercase : int = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
_lowercase : Optional[int] = self.default_image_processor
_lowercase : str = prepare_img()
_lowercase : Dict = image_processor(images=UpperCAmelCase_ ,return_tensors="""tf""" )
# forward pass
_lowercase : str = model(**UpperCAmelCase_ )
# verify the logits
_lowercase : int = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,UpperCAmelCase_ )
_lowercase : Optional[Any] = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] ,UpperCAmelCase_ ,atol=1E-4 )
| 336 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
# Load configuration defined in the metadata file
with open(__UpperCAmelCase ) as metadata_file:
_lowercase : Dict = json.load(__UpperCAmelCase )
_lowercase : List[Any] = LukeConfig(use_entity_aware_attention=__UpperCAmelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_lowercase : Optional[int] = torch.load(__UpperCAmelCase , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
_lowercase : Union[str, Any] = load_original_entity_vocab(__UpperCAmelCase )
# add an entry for [MASK2]
_lowercase : Optional[int] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_lowercase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_lowercase : int = AddedToken("""<ent>""" , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
_lowercase : Optional[Any] = AddedToken("""<ent2>""" , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , """tokenizer_config.json""" ) , """r""" ) as f:
_lowercase : int = json.load(__UpperCAmelCase )
_lowercase : int = """MLukeTokenizer"""
with open(os.path.join(__UpperCAmelCase , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowercase : Dict = MLukeTokenizer.from_pretrained(__UpperCAmelCase )
# Initialize the embeddings of the special tokens
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
_lowercase : Union[str, Any] = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
_lowercase : Union[str, Any] = state_dict["""embeddings.word_embeddings.weight"""]
_lowercase : List[Any] = word_emb[ent_init_index].unsqueeze(0 )
_lowercase : int = word_emb[enta_init_index].unsqueeze(0 )
_lowercase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_lowercase : Optional[int] = state_dict[bias_name]
_lowercase : Dict = decoder_bias[ent_init_index].unsqueeze(0 )
_lowercase : List[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
_lowercase : Tuple = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowercase : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_lowercase : Optional[Any] = state_dict[prefix + matrix_name]
_lowercase : Any = state_dict[prefix + matrix_name]
_lowercase : Dict = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowercase : Any = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_lowercase : int = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_lowercase : Optional[int] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_lowercase : Union[str, Any] = state_dict["""entity_predictions.bias"""]
_lowercase : Optional[int] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_lowercase : Dict = torch.cat([entity_prediction_bias, entity_mask_bias] )
_lowercase : Any = LukeForMaskedLM(config=__UpperCAmelCase ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
_lowercase : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
_lowercase : Optional[int] = state_dict[key]
else:
_lowercase : Dict = state_dict[key]
_lowercase , _lowercase : Any = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
if set(__UpperCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__UpperCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_lowercase : Optional[Any] = MLukeTokenizer.from_pretrained(__UpperCAmelCase , task="""entity_classification""" )
_lowercase : List[Any] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
_lowercase : List[str] = (0, 9)
_lowercase : Dict = tokenizer(__UpperCAmelCase , entity_spans=[span] , return_tensors="""pt""" )
_lowercase : List[Any] = model(**__UpperCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowercase : List[str] = torch.Size((1, 33, 768) )
_lowercase : int = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowercase : str = torch.Size((1, 1, 768) )
_lowercase : List[Any] = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_lowercase : Any = MLukeTokenizer.from_pretrained(__UpperCAmelCase )
_lowercase : Dict = """Tokyo is the capital of <mask>."""
_lowercase : Union[str, Any] = (24, 30)
_lowercase : Dict = tokenizer(__UpperCAmelCase , entity_spans=[span] , return_tensors="""pt""" )
_lowercase : Union[str, Any] = model(**__UpperCAmelCase )
_lowercase : Dict = encoding["""input_ids"""][0].tolist()
_lowercase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
_lowercase : List[Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__UpperCAmelCase )
_lowercase : Tuple = outputs.entity_logits[0][0].argmax().item()
_lowercase : int = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__UpperCAmelCase ) )
model.save_pretrained(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = ["""[MASK]""", """[PAD]""", """[UNK]"""]
_lowercase : Union[str, Any] = [json.loads(__UpperCAmelCase ) for line in open(__UpperCAmelCase )]
_lowercase : Dict = {}
for entry in data:
_lowercase : Optional[Any] = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_lowercase : Tuple = entity_id
break
_lowercase : Optional[int] = F"""{language}:{entity_name}"""
_lowercase : int = entity_id
return new_mapping
if __name__ == "__main__":
UpperCAmelCase: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
UpperCAmelCase: Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 336 |
"""simple docstring"""
UpperCAmelCase: List[str] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 336 | 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 336 |
"""simple docstring"""
UpperCAmelCase: str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase: int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 336 | 1 |
"""simple docstring"""
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
_lowercase : Optional[int] = set_counts
_lowercase : str = max(UpperCAmelCase_ )
_lowercase : str = len(UpperCAmelCase_ )
_lowercase : Any = [1] * num_sets
_lowercase : str = list(range(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Any = self.get_parent(UpperCAmelCase_ )
_lowercase : str = self.get_parent(UpperCAmelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_lowercase : Tuple = 0
_lowercase : List[str] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_lowercase : List[Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_lowercase : List[Any] = 0
_lowercase : List[str] = src_parent
_lowercase : Union[str, Any] = self.set_counts[src_parent]
_lowercase : Optional[int] = max(self.max_set ,UpperCAmelCase_ )
return True
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if self.parents[disj_set] == disj_set:
return disj_set
_lowercase : Dict = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 336 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ):
_lowercase : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowercase : str = math.floor(val / multiple ) * multiple
if x < min_val:
_lowercase : Dict = math.ceil(val / multiple ) * multiple
return x
_lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size
_lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase )
_lowercase , _lowercase : Union[str, Any] = output_size
# determine new height and width
_lowercase : str = output_height / input_height
_lowercase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowercase : str = scale_width
else:
# fit height
_lowercase : int = scale_height
_lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase )
_lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase )
return (new_height, new_width)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84}
_lowercase : str = get_size_dict(UpperCAmelCase_ )
_lowercase : Tuple = do_resize
_lowercase : Any = size
_lowercase : List[Any] = keep_aspect_ratio
_lowercase : Any = ensure_multiple_of
_lowercase : str = resample
_lowercase : Optional[Any] = do_rescale
_lowercase : List[Any] = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowercase : Dict = get_resize_output_image_size(
UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,)
return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : int = get_size_dict(UpperCAmelCase_ )
_lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowercase : List[str] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : int = image_std if image_std is not None else self.image_std
_lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
_lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images]
_lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images]
_lowercase : int = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase_ ):
_lowercase : Tuple = target_sizes.numpy()
_lowercase : Optional[Any] = []
for idx in range(len(UpperCAmelCase_ ) ):
_lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ )
_lowercase : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
_lowercase : Union[str, Any] = logits.argmax(dim=1 )
_lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 336 | 1 |
"""simple docstring"""
import os
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = "input.txt" ):
with open(os.path.join(os.path.dirname(__UpperCAmelCase ) , __UpperCAmelCase ) ) as input_file:
_lowercase : Dict = [
[int(__UpperCAmelCase ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
_lowercase : Optional[int] = len(__UpperCAmelCase )
_lowercase : Tuple = len(matrix[0] )
_lowercase : Any = [[-1 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
for i in range(__UpperCAmelCase ):
_lowercase : Optional[int] = matrix[i][0]
for j in range(1 , __UpperCAmelCase ):
for i in range(__UpperCAmelCase ):
_lowercase : List[str] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __UpperCAmelCase ):
_lowercase : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_lowercase : List[str] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 336 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase: Tuple = [0, 25, 50]
UpperCAmelCase: List[Any] = [25, 50, 75]
UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase: Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase: List[Any] = np.ones(75)
UpperCAmelCase: Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase: int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase: int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 336 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ):
# test for the above condition
self.test()
def lowerCamelCase__ ( self ):
_lowercase : Any = 0
_lowercase : Tuple = False
while not completed:
if counter == 1:
self.reset()
_lowercase : Optional[Any] = self.advance()
if not self.does_advance(UpperCAmelCase_ ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
_lowercase , _lowercase , _lowercase : Optional[Any] = self.update(UpperCAmelCase_ )
counter += 1
if counter > 1_00_00:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def lowerCamelCase__ ( self ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase__ ( self ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase__ ( self ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase__ ( self ,UpperCAmelCase_=False ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
super(UpperCAmelCase_ ,self ).__init__()
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) or len(UpperCAmelCase_ ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
_lowercase : int = token_ids
_lowercase : str = len(self.token_ids )
_lowercase : List[str] = -1 # the index of the currently fulfilled step
_lowercase : Optional[Any] = False
def lowerCamelCase__ ( self ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase_ )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase_ )}""" )
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[Any] = False
if self.does_advance(UpperCAmelCase_ ):
self.fulfilled_idx += 1
_lowercase : Any = True
if self.fulfilled_idx == (self.seqlen - 1):
_lowercase : int = True
_lowercase : Union[str, Any] = completed
else:
# failed to make progress.
_lowercase : Optional[Any] = True
self.reset()
return stepped, completed, reset
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = False
_lowercase : List[str] = 0
def lowerCamelCase__ ( self ):
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase__ ( self ,UpperCAmelCase_=False ):
_lowercase : Any = PhrasalConstraint(self.token_ids )
if stateful:
_lowercase : Dict = self.seqlen
_lowercase : Tuple = self.fulfilled_idx
_lowercase : Tuple = self.completed
return new_constraint
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=True ):
_lowercase : Optional[int] = max([len(UpperCAmelCase_ ) for one in nested_token_ids] )
_lowercase : List[Any] = {}
for token_ids in nested_token_ids:
_lowercase : Union[str, Any] = root
for tidx, token_id in enumerate(UpperCAmelCase_ ):
if token_id not in level:
_lowercase : int = {}
_lowercase : str = level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f""" {nested_token_ids}.""" )
_lowercase : int = root
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Any = self.trie
for current_token in current_seq:
_lowercase : Optional[Any] = start[current_token]
_lowercase : Any = list(start.keys() )
return next_tokens
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : str = self.next_tokens(UpperCAmelCase_ )
return len(UpperCAmelCase_ ) == 0
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[Any] = list(root.values() )
if len(UpperCAmelCase_ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase_ ) for nn in next_nodes] )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = self.count_leaves(UpperCAmelCase_ )
return len(UpperCAmelCase_ ) != leaf_count
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
super(UpperCAmelCase_ ,self ).__init__()
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) or len(UpperCAmelCase_ ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
_lowercase : str = DisjunctiveTrie(UpperCAmelCase_ )
_lowercase : Optional[Any] = nested_token_ids
_lowercase : Optional[Any] = self.trie.max_height
_lowercase : Optional[int] = []
_lowercase : int = False
def lowerCamelCase__ ( self ):
_lowercase : str = self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase_ ) == 0:
return None
else:
return token_list
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase_ )}""" )
_lowercase : Dict = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase_ )}""" )
_lowercase : Union[str, Any] = False
_lowercase : Dict = False
_lowercase : List[str] = False
if self.does_advance(UpperCAmelCase_ ):
self.current_seq.append(UpperCAmelCase_ )
_lowercase : Union[str, Any] = True
else:
_lowercase : Optional[int] = True
self.reset()
_lowercase : List[str] = self.trie.reached_leaf(self.current_seq )
_lowercase : Optional[int] = completed
return stepped, completed, reset
def lowerCamelCase__ ( self ):
_lowercase : Tuple = False
_lowercase : Union[str, Any] = []
def lowerCamelCase__ ( self ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase__ ( self ,UpperCAmelCase_=False ):
_lowercase : Tuple = DisjunctiveConstraint(self.token_ids )
if stateful:
_lowercase : Optional[Any] = self.seqlen
_lowercase : Any = self.current_seq
_lowercase : Dict = self.completed
return new_constraint
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
_lowercase : Dict = constraints
# max # of steps required to fulfill a given constraint
_lowercase : str = max([c.seqlen for c in constraints] )
_lowercase : str = len(UpperCAmelCase_ )
_lowercase : Any = False
self.init_state()
def lowerCamelCase__ ( self ):
_lowercase : List[str] = []
_lowercase : Dict = None
_lowercase : List[str] = [constraint.copy(stateful=UpperCAmelCase_ ) for constraint in self.constraints]
def lowerCamelCase__ ( self ):
_lowercase : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase__ ( self ):
_lowercase : Any = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_lowercase : Any = constraint.advance()
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
token_list.append(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
token_list.extend(UpperCAmelCase_ )
else:
_lowercase : List[Any] = self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
token_list.append(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
token_list.extend(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) == 0:
return None
else:
return token_list
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_lowercase , _lowercase : int = self.add(UpperCAmelCase_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
_lowercase , _lowercase : Tuple = False, False
if self.completed:
_lowercase : Union[str, Any] = True
_lowercase : List[Any] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_lowercase , _lowercase , _lowercase : List[Any] = self.inprogress_constraint.update(UpperCAmelCase_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase_ ) )
_lowercase : List[Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_lowercase : List[Any] = None
if len(self.pending_constraints ) == 0:
# we're done!
_lowercase : int = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase_ ):
_lowercase , _lowercase , _lowercase : Dict = pending_constraint.update(UpperCAmelCase_ )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(UpperCAmelCase_ )
_lowercase : Tuple = None
if not complete and stepped:
_lowercase : List[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_lowercase : Optional[Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_lowercase : Any = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase__ ( self ,UpperCAmelCase_=True ):
_lowercase : List[Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_lowercase : int = [
constraint.copy(stateful=UpperCAmelCase_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_lowercase : Optional[int] = self.inprogress_constraint.copy(stateful=UpperCAmelCase_ )
_lowercase : Dict = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 336 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_lowercase : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
_lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
_lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : List[Any] = self.get_image_processor()
_lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ )
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
_lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : int = self.prepare_image_inputs()
_lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" )
_lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : List[Any] = """lower newer"""
_lowercase : Any = processor(text=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : str = """lower newer"""
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(UpperCAmelCase_ )
_lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Optional[Any] = """lower newer"""
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 336 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = CpmAntTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = False
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Optional[int] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowercase : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def lowerCamelCase__ ( self ):
_lowercase : str = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowercase : List[str] = """今天天气真好!"""
_lowercase : Dict = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowercase : List[str] = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : Tuple = """今天天气真好!"""
_lowercase : Any = [tokenizer.bos_token] + tokens
_lowercase : Union[str, Any] = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.decode(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
| 336 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ):
import pyspark
def generate_fn():
_lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
_lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" )
_lowercase : int = partition_df.collect()
_lowercase : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,):
_lowercase : Union[str, Any] = df
_lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
@property
def lowerCamelCase__ ( self ):
return len(self.partition_order )
class UpperCamelCase ( datasets.DatasetBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = SparkConfig
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
import pyspark
_lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowercase : List[Any] = df
_lowercase : int = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ )
_lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ ,"""a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowercase : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowerCamelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
_lowercase : List[str] = self.df.count()
_lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowercase : Union[str, Any] = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowercase : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) )
_lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
import pyspark
_lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
_lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
_lowercase : Any = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowercase : Union[str, Any] = self.config.features
_lowercase : Optional[int] = self._writer_batch_size
_lowercase : Optional[Any] = self._fs.storage_options
def write_arrow(UpperCAmelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowercase : Any = pyspark.TaskContext().taskAttemptId()
_lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
_lowercase : List[Any] = 0
_lowercase : int = writer_class(
features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Optional[int] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowercase , _lowercase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
shard_id += 1
_lowercase : Union[str, Any] = writer_class(
features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Dict = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
_lowercase , _lowercase : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
_lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : List[str] = (
self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
self._validate_cache_dir()
_lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
_lowercase : Optional[int] = not is_remote_filesystem(self._fs )
_lowercase : Dict = os.path.join if is_local else posixpath.join
_lowercase : int = """-TTTTT-SSSSS-of-NNNNN"""
_lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ )
_lowercase : List[Any] = 0
_lowercase : Optional[Any] = 0
_lowercase : int = 0
_lowercase : Any = []
_lowercase : Any = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
_lowercase : Optional[int] = total_num_examples
_lowercase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_lowercase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowercase : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
rename(
UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,)
_lowercase : Optional[Any] = []
_lowercase : List[str] = 0
for i in range(len(UpperCAmelCase_ ) ):
_lowercase , _lowercase : List[str] = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
_lowercase : Tuple = 0
_lowercase : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,):
return SparkExamplesIterable(self.df )
| 336 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
UpperCAmelCase: List[str] = (720, 1_280) # Height, Width
UpperCAmelCase: List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
UpperCAmelCase: List[Any] = 1 / 100
UpperCAmelCase: List[Any] = """"""
UpperCAmelCase: Union[str, Any] = """"""
UpperCAmelCase: Optional[int] = """"""
UpperCAmelCase: int = 250
def __SCREAMING_SNAKE_CASE ( ):
_lowercase , _lowercase : Optional[int] = get_dataset(__UpperCAmelCase , __UpperCAmelCase )
for index in range(__UpperCAmelCase ):
_lowercase : Tuple = random.sample(range(len(__UpperCAmelCase ) ) , 4 )
_lowercase , _lowercase , _lowercase : Tuple = update_image_and_anno(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , filter_scale=__UpperCAmelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowercase : int = random_chars(32 )
_lowercase : Optional[Any] = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowercase : Any = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , __UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowercase : List[str] = []
for anno in new_annos:
_lowercase : Optional[Any] = anno[3] - anno[1]
_lowercase : Optional[Any] = anno[4] - anno[2]
_lowercase : List[Any] = anno[1] + width / 2
_lowercase : int = anno[2] + height / 2
_lowercase : List[Any] = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(__UpperCAmelCase )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Union[str, Any] = []
_lowercase : List[str] = []
for label_file in glob.glob(os.path.join(__UpperCAmelCase , """*.txt""" ) ):
_lowercase : int = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__UpperCAmelCase ) as in_file:
_lowercase : Any = in_file.readlines()
_lowercase : List[Any] = os.path.join(__UpperCAmelCase , F"""{label_name}.jpg""" )
_lowercase : Any = []
for obj_list in obj_lists:
_lowercase : Union[str, Any] = obj_list.rstrip("""\n""" ).split(""" """ )
_lowercase : Optional[Any] = float(obj[1] ) - float(obj[3] ) / 2
_lowercase : Optional[Any] = float(obj[2] ) - float(obj[4] ) / 2
_lowercase : List[Any] = float(obj[1] ) + float(obj[3] ) / 2
_lowercase : Dict = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__UpperCAmelCase )
labels.append(__UpperCAmelCase )
return img_paths, labels
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.0 , ):
_lowercase : Optional[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowercase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowercase : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowercase : Dict = int(scale_x * output_size[1] )
_lowercase : Optional[Any] = int(scale_y * output_size[0] )
_lowercase : Dict = []
_lowercase : List[str] = []
for i, index in enumerate(__UpperCAmelCase ):
_lowercase : Dict = all_img_list[index]
path_list.append(__UpperCAmelCase )
_lowercase : Optional[Any] = all_annos[index]
_lowercase : Optional[Any] = cva.imread(__UpperCAmelCase )
if i == 0: # top-left
_lowercase : Any = cva.resize(__UpperCAmelCase , (divid_point_x, divid_point_y) )
_lowercase : List[Any] = img
for bbox in img_annos:
_lowercase : Optional[Any] = bbox[1] * scale_x
_lowercase : int = bbox[2] * scale_y
_lowercase : List[str] = bbox[3] * scale_x
_lowercase : Optional[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowercase : str = cva.resize(__UpperCAmelCase , (output_size[1] - divid_point_x, divid_point_y) )
_lowercase : Union[str, Any] = img
for bbox in img_annos:
_lowercase : Any = scale_x + bbox[1] * (1 - scale_x)
_lowercase : List[Any] = bbox[2] * scale_y
_lowercase : List[str] = scale_x + bbox[3] * (1 - scale_x)
_lowercase : List[str] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowercase : Optional[Any] = cva.resize(__UpperCAmelCase , (divid_point_x, output_size[0] - divid_point_y) )
_lowercase : str = img
for bbox in img_annos:
_lowercase : str = bbox[1] * scale_x
_lowercase : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
_lowercase : Dict = bbox[3] * scale_x
_lowercase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowercase : List[str] = cva.resize(
__UpperCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowercase : Union[str, Any] = img
for bbox in img_annos:
_lowercase : Dict = scale_x + bbox[1] * (1 - scale_x)
_lowercase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_lowercase : int = scale_x + bbox[3] * (1 - scale_x)
_lowercase : Union[str, Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowercase : List[str] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
assert number_char > 1, "The number of character should greater than 1"
_lowercase : Dict = ascii_lowercase + digits
return "".join(random.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 336 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = """<s>"""
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(UpperCAmelCase_ ) ,10_06 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] )
_lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self ):
# fmt: off
_lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 336 | 1 |
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCAmelCase: Dict = HfApi()
UpperCAmelCase: Optional[Any] = {}
# fmt: off
UpperCAmelCase: Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
UpperCAmelCase: int = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
UpperCAmelCase: Dict = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
UpperCAmelCase: str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
UpperCAmelCase: Dict = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
UpperCAmelCase: List[str] = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
UpperCAmelCase: Dict = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
UpperCAmelCase: Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
UpperCAmelCase: Optional[int] = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
UpperCAmelCase: List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
UpperCAmelCase: Optional[int] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
UpperCAmelCase: str = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
UpperCAmelCase: Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
UpperCAmelCase: Any = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
UpperCAmelCase: Tuple = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
UpperCAmelCase: str = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCAmelCase: Union[str, Any] = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith("""CompVis"""):
UpperCAmelCase: int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
UpperCAmelCase: Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCAmelCase: int = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCAmelCase: Union[str, Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCAmelCase: Tuple = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3
)
print(F'{mod.modelId} has passed successfully!!!')
| 336 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 1 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 336 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = []
for line in lines:
_lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(__UpperCAmelCase )
_lowercase : Tuple = """\n""".join(__UpperCAmelCase )
# Make a hash from all this code
_lowercase : Tuple = full_str.encode("""utf-8""" )
return shaaaa(__UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase: Tuple = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase: List[str] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
UpperCAmelCase: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 336 | 1 |
"""simple docstring"""
import os
from math import logaa
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = "base_exp.txt" ):
_lowercase : float = 0
_lowercase : str = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__UpperCAmelCase ) , __UpperCAmelCase ) ) ):
_lowercase , _lowercase : List[str] = list(map(__UpperCAmelCase , line.split(""",""" ) ) )
if x * logaa(__UpperCAmelCase ) > largest:
_lowercase : Optional[Any] = x * logaa(__UpperCAmelCase )
_lowercase : int = i + 1
return result
if __name__ == "__main__":
print(solution())
| 336 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336 | 1 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase : str = []
for i in range(__UpperCAmelCase ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ : str = 2
@register_to_config
def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,):
if trained_betas is not None:
_lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Any = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowercase : Tuple = 1.0 - self.betas
_lowercase : Dict = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
if schedule_timesteps is None:
_lowercase : Optional[int] = self.timesteps
_lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0
else:
_lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
_lowercase : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : str = self.index_for_timestep(UpperCAmelCase_ )
if self.state_in_first_order:
_lowercase : Optional[Any] = self.sigmas[step_index]
else:
_lowercase : Dict = self.sigmas_interpol[step_index]
_lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,):
_lowercase : List[str] = num_inference_steps
_lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ )
_lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ )
# interpolate sigmas
_lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
_lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowercase : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
# mps does not support float64
_lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa )
else:
_lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
# interpolate timesteps
_lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype )
_lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
_lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowercase : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# get log sigma
_lowercase : Optional[Any] = sigma.log()
# get distribution
_lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowercase : List[Any] = low_idx + 1
_lowercase : int = self.log_sigmas[low_idx]
_lowercase : Any = self.log_sigmas[high_idx]
# interpolate sigmas
_lowercase : Any = (low - log_sigma) / (low - high)
_lowercase : Dict = w.clamp(0 ,1 )
# transform interpolation to time range
_lowercase : List[str] = (1 - w) * low_idx + w * high_idx
_lowercase : Optional[int] = t.view(sigma.shape )
return t
@property
def lowerCamelCase__ ( self ):
return self.sample is None
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ )
# advance index counter by 1
_lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowercase : Any = self.sigmas[step_index]
_lowercase : Any = self.sigmas_interpol[step_index + 1]
_lowercase : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowercase : Union[str, Any] = self.sigmas[step_index - 1]
_lowercase : int = self.sigmas_interpol[step_index]
_lowercase : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowercase : Any = 0
_lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowercase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
_lowercase : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowercase : Optional[Any] = sigma_next - sigma_hat
_lowercase : Any = self.sample
_lowercase : Optional[int] = None
_lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ):
# mps does not support float64
_lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_lowercase : List[Any] = self.timesteps.to(original_samples.device )
_lowercase : Union[str, Any] = timesteps.to(original_samples.device )
_lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps]
_lowercase : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowercase : List[Any] = sigma.unsqueeze(-1 )
_lowercase : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 336 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCAmelCase: Any = generate_large_matrix()
UpperCAmelCase: Dict = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
_lowercase : List[Any] = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowercase : Tuple = (left + right) // 2
_lowercase : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowercase : Dict = mid + 1
else:
_lowercase : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Any = 0
_lowercase : Optional[int] = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print("""Running benchmarks""" )
_lowercase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 336 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase: int = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "codegen"
SCREAMING_SNAKE_CASE_ : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,UpperCAmelCase_=5_04_00 ,UpperCAmelCase_=20_48 ,UpperCAmelCase_=20_48 ,UpperCAmelCase_=40_96 ,UpperCAmelCase_=28 ,UpperCAmelCase_=16 ,UpperCAmelCase_=64 ,UpperCAmelCase_=None ,UpperCAmelCase_="gelu_new" ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-5 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=True ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=False ,**UpperCAmelCase_ ,):
_lowercase : List[str] = vocab_size
_lowercase : Tuple = n_ctx
_lowercase : List[Any] = n_positions
_lowercase : Any = n_embd
_lowercase : Tuple = n_layer
_lowercase : List[Any] = n_head
_lowercase : List[Any] = n_inner
_lowercase : Tuple = rotary_dim
_lowercase : List[str] = activation_function
_lowercase : Any = resid_pdrop
_lowercase : str = embd_pdrop
_lowercase : Any = attn_pdrop
_lowercase : List[str] = layer_norm_epsilon
_lowercase : List[Any] = initializer_range
_lowercase : List[str] = use_cache
_lowercase : List[str] = bos_token_id
_lowercase : List[str] = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,tie_word_embeddings=UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "default" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,):
super().__init__(UpperCAmelCase_ ,task=UpperCAmelCase_ ,patching_specs=UpperCAmelCase_ ,use_past=UpperCAmelCase_ )
if not getattr(self._config ,"""pad_token_id""" ,UpperCAmelCase_ ):
# TODO: how to do that better?
_lowercase : List[Any] = 0
@property
def lowerCamelCase__ ( self ):
_lowercase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ ,direction="""inputs""" )
_lowercase : Dict = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_lowercase : Optional[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
return self._config.n_head
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = -1 ,UpperCAmelCase_ = -1 ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,):
_lowercase : Optional[Any] = super(UpperCAmelCase_ ,self ).generate_dummy_inputs(
UpperCAmelCase_ ,batch_size=UpperCAmelCase_ ,seq_length=UpperCAmelCase_ ,is_pair=UpperCAmelCase_ ,framework=UpperCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_lowercase : Tuple = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowercase , _lowercase : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowercase : Dict = seqlen + 2
_lowercase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowercase : List[str] = [
(torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ )) for _ in range(self.num_layers )
]
_lowercase : Optional[int] = common_inputs["""attention_mask"""]
if self.use_past:
_lowercase : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
_lowercase : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )] ,dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
return 13
| 336 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase: List[str] = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase: int = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 336 | 1 |
"""simple docstring"""
UpperCAmelCase: Optional[Any] = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
UpperCAmelCase: Optional[Any] = {value: key for key, value in encode_dict.items()}
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Union[str, Any] = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if set(__UpperCAmelCase ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
_lowercase : Optional[Any] = """"""
for word in coded.split():
while len(__UpperCAmelCase ) != 0:
decoded += decode_dict[word[:5]]
_lowercase : Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 336 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase : str = []
for i in range(__UpperCAmelCase ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ : str = 2
@register_to_config
def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,):
if trained_betas is not None:
_lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Any = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowercase : Tuple = 1.0 - self.betas
_lowercase : Dict = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
if schedule_timesteps is None:
_lowercase : Optional[int] = self.timesteps
_lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0
else:
_lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
_lowercase : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : str = self.index_for_timestep(UpperCAmelCase_ )
if self.state_in_first_order:
_lowercase : Optional[Any] = self.sigmas[step_index]
else:
_lowercase : Dict = self.sigmas_interpol[step_index]
_lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,):
_lowercase : List[str] = num_inference_steps
_lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ )
_lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ )
# interpolate sigmas
_lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
_lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowercase : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
# mps does not support float64
_lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa )
else:
_lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
# interpolate timesteps
_lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype )
_lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
_lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowercase : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# get log sigma
_lowercase : Optional[Any] = sigma.log()
# get distribution
_lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowercase : List[Any] = low_idx + 1
_lowercase : int = self.log_sigmas[low_idx]
_lowercase : Any = self.log_sigmas[high_idx]
# interpolate sigmas
_lowercase : Any = (low - log_sigma) / (low - high)
_lowercase : Dict = w.clamp(0 ,1 )
# transform interpolation to time range
_lowercase : List[str] = (1 - w) * low_idx + w * high_idx
_lowercase : Optional[int] = t.view(sigma.shape )
return t
@property
def lowerCamelCase__ ( self ):
return self.sample is None
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ )
# advance index counter by 1
_lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowercase : Any = self.sigmas[step_index]
_lowercase : Any = self.sigmas_interpol[step_index + 1]
_lowercase : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowercase : Union[str, Any] = self.sigmas[step_index - 1]
_lowercase : int = self.sigmas_interpol[step_index]
_lowercase : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowercase : Any = 0
_lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowercase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
_lowercase : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowercase : Optional[Any] = sigma_next - sigma_hat
_lowercase : Any = self.sample
_lowercase : Optional[int] = None
_lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ):
# mps does not support float64
_lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_lowercase : List[Any] = self.timesteps.to(original_samples.device )
_lowercase : Union[str, Any] = timesteps.to(original_samples.device )
_lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps]
_lowercase : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowercase : List[Any] = sigma.unsqueeze(-1 )
_lowercase : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 336 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
UpperCAmelCase: Union[str, Any] = """2020.9.26"""
UpperCAmelCase: Any = """xcodz-dot, cclaus, dhruvmanila"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if not all(isinstance(__UpperCAmelCase , (float, int) ) for val in locals().values() ):
_lowercase : Union[str, Any] = F"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(__UpperCAmelCase )
_lowercase : Dict = ((x * distance) / (z + distance)) * scale
_lowercase : Optional[int] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""Axis must be a str""" )
_lowercase : Dict = locals()
del input_variables["axis"]
if not all(isinstance(__UpperCAmelCase , (float, int) ) for val in input_variables.values() ):
_lowercase : Union[str, Any] = (
"""Input values except axis must either be float or int: """
F"""{list(input_variables.values() )}"""
)
raise TypeError(__UpperCAmelCase )
_lowercase : List[str] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_lowercase : Optional[int] = x * math.cos(__UpperCAmelCase ) - y * math.sin(__UpperCAmelCase )
_lowercase : Any = y * math.cos(__UpperCAmelCase ) + x * math.sin(__UpperCAmelCase )
_lowercase : Tuple = z
elif axis == "x":
_lowercase : Tuple = y * math.cos(__UpperCAmelCase ) - z * math.sin(__UpperCAmelCase )
_lowercase : Any = z * math.cos(__UpperCAmelCase ) + y * math.sin(__UpperCAmelCase )
_lowercase : List[Any] = x
elif axis == "y":
_lowercase : Dict = x * math.cos(__UpperCAmelCase ) - z * math.sin(__UpperCAmelCase )
_lowercase : Tuple = z * math.cos(__UpperCAmelCase ) + x * math.sin(__UpperCAmelCase )
_lowercase : str = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }')
print(F'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }')
| 336 |
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 336 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase: Tuple = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "trocr"
SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : str = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self ,UpperCAmelCase_=5_02_65 ,UpperCAmelCase_=10_24 ,UpperCAmelCase_=12 ,UpperCAmelCase_=16 ,UpperCAmelCase_=40_96 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=True ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=0 ,UpperCAmelCase_=2 ,**UpperCAmelCase_ ,):
_lowercase : List[Any] = vocab_size
_lowercase : Optional[Any] = d_model
_lowercase : List[Any] = decoder_layers
_lowercase : Optional[int] = decoder_attention_heads
_lowercase : Dict = decoder_ffn_dim
_lowercase : Any = activation_function
_lowercase : Tuple = max_position_embeddings
_lowercase : int = dropout
_lowercase : Union[str, Any] = attention_dropout
_lowercase : Optional[Any] = activation_dropout
_lowercase : List[Any] = init_std
_lowercase : List[Any] = decoder_layerdrop
_lowercase : str = use_cache
_lowercase : Tuple = scale_embedding
_lowercase : Optional[int] = use_learned_position_embeddings
_lowercase : str = layernorm_embedding
super().__init__(
pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,decoder_start_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
| 336 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : int
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_lowercase : Tuple = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowercase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_lowercase : Optional[Any] = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_lowercase : int = [""""""] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """
UpperCAmelCase: int = input(entry_msg).strip()
UpperCAmelCase: List[str] = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 336 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCAmelCase: Tuple = logging.get_logger(__name__)
UpperCAmelCase: str = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "imagegpt"
SCREAMING_SNAKE_CASE_ : Any = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : Any = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,UpperCAmelCase_=5_12 + 1 ,UpperCAmelCase_=32 * 32 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=24 ,UpperCAmelCase_=8 ,UpperCAmelCase_=None ,UpperCAmelCase_="quick_gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=1E-5 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=False ,UpperCAmelCase_=False ,UpperCAmelCase_=False ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = vocab_size
_lowercase : int = n_positions
_lowercase : Optional[int] = n_embd
_lowercase : Tuple = n_layer
_lowercase : List[str] = n_head
_lowercase : Dict = n_inner
_lowercase : Union[str, Any] = activation_function
_lowercase : Any = resid_pdrop
_lowercase : Tuple = embd_pdrop
_lowercase : Dict = attn_pdrop
_lowercase : int = layer_norm_epsilon
_lowercase : Dict = initializer_range
_lowercase : Dict = scale_attn_weights
_lowercase : Any = use_cache
_lowercase : Dict = scale_attn_by_inverse_layer_idx
_lowercase : str = reorder_and_upcast_attn
_lowercase : Optional[Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = -1 ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = 32 ,UpperCAmelCase_ = 32 ,):
_lowercase : List[Any] = self._generate_dummy_images(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : List[Any] = dict(preprocessor(images=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ) )
return inputs
| 336 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
_lowercase : Tuple = randint(-5000 , 5000 )
return (arr, r)
UpperCAmelCase: int = make_dataset()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
for triplet in permutations(__UpperCAmelCase , 3 ):
if sum(__UpperCAmelCase ) == target:
return tuple(sorted(__UpperCAmelCase ) )
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
arr.sort()
_lowercase : Optional[Any] = len(__UpperCAmelCase )
for i in range(n - 1 ):
_lowercase , _lowercase : str = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Tuple = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_lowercase : Union[str, Any] = """
triplet_sum1(*dataset)
"""
_lowercase : Union[str, Any] = """
triplet_sum2(*dataset)
"""
_lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
_lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
return (min(__UpperCAmelCase ), min(__UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase: Any = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 336 | 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCAmelCase: Any = generate_large_matrix()
UpperCAmelCase: Dict = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
_lowercase : List[Any] = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowercase : Tuple = (left + right) // 2
_lowercase : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowercase : Dict = mid + 1
else:
_lowercase : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Any = 0
_lowercase : Optional[int] = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print("""Running benchmarks""" )
_lowercase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 336 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
# add QFormer tokenizer
_lowercase : Optional[int] = qformer_tokenizer
def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
_lowercase : List[Any] = BatchFeature()
if text is not None:
_lowercase : List[str] = self.tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
encoding.update(UpperCAmelCase_ )
_lowercase : Dict = self.qformer_tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : str = qformer_text_encoding.pop("""input_ids""" )
_lowercase : int = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
_lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.tokenizer.model_input_names
_lowercase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ )
_lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ )
return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" )
_lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
args.append(UpperCAmelCase_ )
return cls(*UpperCAmelCase_ )
| 336 | 1 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not sentence:
return ""
_lowercase : Any = dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 336 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase: Tuple = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : Tuple = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=5 ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2_49 ,UpperCAmelCase_=6 ,UpperCAmelCase_=17 ,UpperCAmelCase_=25 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0006 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,**UpperCAmelCase_ ,):
_lowercase : Dict = vocab_size
_lowercase : List[str] = action_weight
_lowercase : int = reward_weight
_lowercase : List[Any] = value_weight
_lowercase : List[str] = max_position_embeddings
_lowercase : Any = block_size
_lowercase : Any = action_dim
_lowercase : List[str] = observation_dim
_lowercase : Union[str, Any] = transition_dim
_lowercase : str = learning_rate
_lowercase : Tuple = n_layer
_lowercase : Optional[int] = n_head
_lowercase : List[str] = n_embd
_lowercase : List[str] = embd_pdrop
_lowercase : Optional[Any] = attn_pdrop
_lowercase : List[Any] = resid_pdrop
_lowercase : str = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : List[Any] = kaiming_initializer_range
_lowercase : List[Any] = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
| 336 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ShapEPipeline
SCREAMING_SNAKE_CASE_ : List[str] = ["prompt"]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["prompt"]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE_ : Dict = False
@property
def lowerCamelCase__ ( self ):
return 32
@property
def lowerCamelCase__ ( self ):
return 32
@property
def lowerCamelCase__ ( self ):
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ):
return 8
@property
def lowerCamelCase__ ( self ):
_lowercase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
return CLIPTextModelWithProjection(UpperCAmelCase_ )
@property
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : Optional[Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowercase : Any = PriorTransformer(**UpperCAmelCase_ )
return model
@property
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : Dict = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowercase : Tuple = ShapERenderer(**UpperCAmelCase_ )
return model
def lowerCamelCase__ ( self ):
_lowercase : str = self.dummy_prior
_lowercase : Optional[int] = self.dummy_text_encoder
_lowercase : Union[str, Any] = self.dummy_tokenizer
_lowercase : str = self.dummy_renderer
_lowercase : int = HeunDiscreteScheduler(
beta_schedule="""exp""" ,num_train_timesteps=10_24 ,prediction_type="""sample""" ,use_karras_sigmas=UpperCAmelCase_ ,clip_sample=UpperCAmelCase_ ,clip_sample_range=1.0 ,)
_lowercase : Tuple = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=0 ):
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_lowercase : str = torch.manual_seed(UpperCAmelCase_ )
else:
_lowercase : Optional[int] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_lowercase : Optional[int] = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self ):
_lowercase : Tuple = """cpu"""
_lowercase : List[str] = self.get_dummy_components()
_lowercase : List[str] = self.pipeline_class(**UpperCAmelCase_ )
_lowercase : Union[str, Any] = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowercase : List[str] = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
_lowercase : Tuple = output.images[0]
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowercase : int = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self ):
_lowercase : Dict = torch_device == """cpu"""
_lowercase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=UpperCAmelCase_ ,relax_max_difference=UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
_lowercase : List[str] = self.get_dummy_components()
_lowercase : Optional[int] = self.pipeline_class(**UpperCAmelCase_ )
_lowercase : int = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowercase : Tuple = 1
_lowercase : Tuple = 2
_lowercase : Dict = self.get_dummy_inputs(UpperCAmelCase_ )
for key in inputs.keys():
if key in self.batch_params:
_lowercase : List[str] = batch_size * [inputs[key]]
_lowercase : List[str] = pipe(**UpperCAmelCase_ ,num_images_per_prompt=UpperCAmelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
_lowercase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowercase : Any = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowercase : Dict = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowercase : List[str] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
_lowercase : List[Any] = pipe(
"""a shark""" ,generator=UpperCAmelCase_ ,guidance_scale=15.0 ,num_inference_steps=64 ,frame_size=64 ,output_type="""np""" ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCAmelCase_ ,UpperCAmelCase_ )
| 336 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase: Any = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model"
def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Tuple = intermediate_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = patch_size
_lowercase : Optional[Any] = image_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = attention_dropout
_lowercase : List[Any] = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : Tuple = qkv_bias
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer"
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : List[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Optional[Any] = hidden_act
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Tuple = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : Dict = cross_attention_frequency
_lowercase : Optional[Any] = encoder_hidden_size
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "instructblip"
SCREAMING_SNAKE_CASE_ : List[str] = True
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ):
super().__init__(**UpperCAmelCase_ )
if vision_config is None:
_lowercase : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_lowercase : Any = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_lowercase : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ )
_lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ )
_lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : Union[str, Any] = self.text_config.is_encoder_decoder
_lowercase : List[str] = num_query_tokens
_lowercase : List[str] = self.vision_config.hidden_size
_lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Union[str, Any] = 1.0
_lowercase : Dict = 0.02
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowercase : int = self.vision_config.to_dict()
_lowercase : Any = self.qformer_config.to_dict()
_lowercase : Any = self.text_config.to_dict()
_lowercase : Optional[int] = self.__class__.model_type
return output
| 336 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = (PNDMScheduler,)
SCREAMING_SNAKE_CASE_ : List[Any] = (("num_inference_steps", 5_0),)
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
_lowercase : int = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**UpperCAmelCase_ )
return config
def lowerCamelCase__ ( self ,UpperCAmelCase_=0 ,**UpperCAmelCase_ ):
_lowercase : Union[str, Any] = dict(self.forward_default_kwargs )
_lowercase : Any = kwargs.pop("""num_inference_steps""" ,UpperCAmelCase_ )
_lowercase : Dict = self.dummy_sample
_lowercase : Union[str, Any] = 0.1 * sample
_lowercase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowercase : int = self.get_scheduler_config(**UpperCAmelCase_ )
_lowercase : Tuple = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals
_lowercase : Optional[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_ )
_lowercase : Optional[Any] = scheduler_class.from_pretrained(UpperCAmelCase_ )
new_scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals
_lowercase : Any = dummy_past_residuals[:]
_lowercase : int = scheduler.step_prk(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ).prev_sample
_lowercase : Union[str, Any] = new_scheduler.step_prk(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowercase : int = scheduler.step_plms(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ).prev_sample
_lowercase : int = new_scheduler.step_plms(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ,UpperCAmelCase_=0 ,**UpperCAmelCase_ ):
_lowercase : List[Any] = dict(self.forward_default_kwargs )
_lowercase : Dict = kwargs.pop("""num_inference_steps""" ,UpperCAmelCase_ )
_lowercase : Dict = self.dummy_sample
_lowercase : List[Any] = 0.1 * sample
_lowercase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowercase : str = self.get_scheduler_config()
_lowercase : Dict = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
_lowercase : List[str] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_ )
_lowercase : List[str] = scheduler_class.from_pretrained(UpperCAmelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residual (must be after setting timesteps)
_lowercase : Any = dummy_past_residuals[:]
_lowercase : Union[str, Any] = scheduler.step_prk(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ).prev_sample
_lowercase : Union[str, Any] = new_scheduler.step_prk(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowercase : Union[str, Any] = scheduler.step_plms(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ).prev_sample
_lowercase : List[Any] = new_scheduler.step_plms(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
_lowercase : int = self.scheduler_classes[0]
_lowercase : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase_ )
_lowercase : Optional[int] = scheduler_class(**UpperCAmelCase_ )
_lowercase : Optional[int] = 10
_lowercase : Optional[Any] = self.dummy_model()
_lowercase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowercase : Optional[Any] = model(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : List[str] = scheduler.step_prk(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowercase : List[str] = model(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : int = scheduler.step_plms(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ).prev_sample
return sample
def lowerCamelCase__ ( self ):
_lowercase : int = dict(self.forward_default_kwargs )
_lowercase : List[str] = kwargs.pop("""num_inference_steps""" ,UpperCAmelCase_ )
for scheduler_class in self.scheduler_classes:
_lowercase : str = self.get_scheduler_config()
_lowercase : str = scheduler_class(**UpperCAmelCase_ )
_lowercase : Union[str, Any] = self.dummy_sample
_lowercase : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase_ ,"""set_timesteps""" ):
scheduler.set_timesteps(UpperCAmelCase_ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase_ ,"""set_timesteps""" ):
_lowercase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowercase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowercase : Optional[int] = dummy_past_residuals[:]
_lowercase : Optional[int] = scheduler.step_prk(UpperCAmelCase_ ,0 ,UpperCAmelCase_ ,**UpperCAmelCase_ ).prev_sample
_lowercase : Union[str, Any] = scheduler.step_prk(UpperCAmelCase_ ,1 ,UpperCAmelCase_ ,**UpperCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
_lowercase : Tuple = scheduler.step_plms(UpperCAmelCase_ ,0 ,UpperCAmelCase_ ,**UpperCAmelCase_ ).prev_sample
_lowercase : Union[str, Any] = scheduler.step_plms(UpperCAmelCase_ ,1 ,UpperCAmelCase_ ,**UpperCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def lowerCamelCase__ ( self ):
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase_ )
_lowercase : List[Any] = self.scheduler_classes[0]
_lowercase : Tuple = self.get_scheduler_config(steps_offset=1 )
_lowercase : Optional[int] = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) ,)
def lowerCamelCase__ ( self ):
for beta_start, beta_end in zip([0.0001, 0.001] ,[0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase_ ,beta_end=UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
_lowercase : Optional[Any] = 27
for scheduler_class in self.scheduler_classes:
_lowercase : Dict = self.dummy_sample
_lowercase : Tuple = 0.1 * sample
_lowercase : str = self.get_scheduler_config()
_lowercase : List[str] = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowercase : List[Any] = scheduler.step_prk(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ).prev_sample
def lowerCamelCase__ ( self ):
with self.assertRaises(UpperCAmelCase_ ):
_lowercase : Optional[int] = self.scheduler_classes[0]
_lowercase : int = self.get_scheduler_config()
_lowercase : Tuple = scheduler_class(**UpperCAmelCase_ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = self.full_loop()
_lowercase : Any = torch.sum(torch.abs(UpperCAmelCase_ ) )
_lowercase : Optional[int] = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.full_loop(prediction_type="""v_prediction""" )
_lowercase : List[str] = torch.sum(torch.abs(UpperCAmelCase_ ) )
_lowercase : List[str] = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def lowerCamelCase__ ( self ):
# We specify different beta, so that the first alpha is 0.99
_lowercase : Optional[int] = self.full_loop(set_alpha_to_one=UpperCAmelCase_ ,beta_start=0.01 )
_lowercase : Any = torch.sum(torch.abs(UpperCAmelCase_ ) )
_lowercase : Optional[Any] = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def lowerCamelCase__ ( self ):
# We specify different beta, so that the first alpha is 0.99
_lowercase : str = self.full_loop(set_alpha_to_one=UpperCAmelCase_ ,beta_start=0.01 )
_lowercase : Optional[int] = torch.sum(torch.abs(UpperCAmelCase_ ) )
_lowercase : List[str] = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 336 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase: Tuple = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase: Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 336 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Dict = {"""unk_token""": """<unk>"""}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,UpperCAmelCase_ )
self.assertIn("""attention_mask""" ,UpperCAmelCase_ )
self.assertNotIn("""labels""" ,UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : List[Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ["""A long paragraph for summarization."""]
_lowercase : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : Union[str, Any] = inputs["""input_ids"""]
_lowercase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : str = ["""Summary of the text.""", """Another summary."""]
_lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ )
_lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]]
_lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = """A, <mask> AllenNLP sentence."""
_lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 336 | 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_lowercase : Optional[int] = str(bin(__UpperCAmelCase ) )[2:] # remove the leading "0b"
_lowercase : Any = str(bin(__UpperCAmelCase ) )[2:]
_lowercase : str = max(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCAmelCase ) , b_binary.zfill(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336 | 1 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 336 |
"""simple docstring"""
UpperCAmelCase: List[str] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 336 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=13 ,UpperCAmelCase_=7 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=99 ,UpperCAmelCase_=32 ,UpperCAmelCase_=5 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=4 ,):
_lowercase : Tuple = parent
_lowercase : Optional[int] = batch_size
_lowercase : List[Any] = seq_length
_lowercase : int = is_training
_lowercase : Any = use_attention_mask
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : Optional[Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : List[str] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : Any = type_vocab_size
_lowercase : List[str] = type_sequence_label_size
_lowercase : Tuple = initializer_range
_lowercase : int = num_choices
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase : Any = None
if self.use_attention_mask:
_lowercase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Dict = None
if self.use_token_type_ids:
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowercase : int = RobertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=UpperCAmelCase_ ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self ):
_lowercase : Tuple = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Dict = config_and_inputs
_lowercase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Any = config_and_inputs
_lowercase : Optional[Any] = True
_lowercase : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Any = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase__ ( self ):
for model_class_name in self.all_model_classes:
_lowercase : str = model_class_name.from_pretrained("""roberta-base""" ,from_pt=UpperCAmelCase_ )
_lowercase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
| 336 |
"""simple docstring"""
UpperCAmelCase: str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase: int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 336 | 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : List[Any] = len(__UpperCAmelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
_lowercase : Tuple = 0
print(__UpperCAmelCase , end=""",""" )
# Consider rest of the activities
for j in range(__UpperCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__UpperCAmelCase , end=""",""" )
_lowercase : int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase: Union[str, Any] = [1, 3, 0, 5, 8, 5]
UpperCAmelCase: List[Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 336 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ):
_lowercase : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowercase : str = math.floor(val / multiple ) * multiple
if x < min_val:
_lowercase : Dict = math.ceil(val / multiple ) * multiple
return x
_lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size
_lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase )
_lowercase , _lowercase : Union[str, Any] = output_size
# determine new height and width
_lowercase : str = output_height / input_height
_lowercase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowercase : str = scale_width
else:
# fit height
_lowercase : int = scale_height
_lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase )
_lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase )
return (new_height, new_width)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84}
_lowercase : str = get_size_dict(UpperCAmelCase_ )
_lowercase : Tuple = do_resize
_lowercase : Any = size
_lowercase : List[Any] = keep_aspect_ratio
_lowercase : Any = ensure_multiple_of
_lowercase : str = resample
_lowercase : Optional[Any] = do_rescale
_lowercase : List[Any] = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowercase : Dict = get_resize_output_image_size(
UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,)
return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : int = get_size_dict(UpperCAmelCase_ )
_lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowercase : List[str] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : int = image_std if image_std is not None else self.image_std
_lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
_lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images]
_lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images]
_lowercase : int = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase_ ):
_lowercase : Tuple = target_sizes.numpy()
_lowercase : Optional[Any] = []
for idx in range(len(UpperCAmelCase_ ) ):
_lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ )
_lowercase : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
_lowercase : Union[str, Any] = logits.argmax(dim=1 )
_lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 336 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCamelCase ( unittest.TestCase , snake_case ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = load_tool("""text-classification""" )
self.tool.setup()
_lowercase : Tuple = load_tool("""text-classification""" ,remote=UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(UpperCAmelCase_ ,"""positive""" )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.remote_tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(UpperCAmelCase_ ,"""positive""" )
def lowerCamelCase__ ( self ):
_lowercase : Any = self.tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(UpperCAmelCase_ ,"""positive""" )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.remote_tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(UpperCAmelCase_ ,"""positive""" )
| 336 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase: Tuple = [0, 25, 50]
UpperCAmelCase: List[Any] = [25, 50, 75]
UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase: Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase: List[Any] = np.ones(75)
UpperCAmelCase: Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase: int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase: int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 336 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "ibert"
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=0 ,UpperCAmelCase_=2 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=False ,UpperCAmelCase_="none" ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Any = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Tuple = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[Any] = hidden_act
_lowercase : Tuple = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Tuple = type_vocab_size
_lowercase : List[Any] = initializer_range
_lowercase : List[Any] = layer_norm_eps
_lowercase : str = position_embedding_type
_lowercase : str = quant_mode
_lowercase : Union[str, Any] = force_dequant
class UpperCamelCase ( snake_case ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self ):
if self.task == "multiple-choice":
_lowercase : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowercase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 336 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_lowercase : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
_lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
_lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : List[Any] = self.get_image_processor()
_lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ )
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
_lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : int = self.prepare_image_inputs()
_lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" )
_lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : List[Any] = """lower newer"""
_lowercase : Any = processor(text=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : str = """lower newer"""
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(UpperCAmelCase_ )
_lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Optional[Any] = """lower newer"""
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 336 | 1 |
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
UpperCAmelCase: List[str] = True
from torch.cuda.amp import autocast
UpperCAmelCase: Tuple = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase=None , __UpperCAmelCase=None ):
return field(default_factory=lambda: default , metadata=__UpperCAmelCase )
@dataclass
class UpperCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE_ : Optional[bool] = field(
default=snake_case , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class UpperCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=snake_case , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=snake_case , metadata={"help": "The number of processes to use for the preprocessing."} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE_ : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class UpperCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : WavaVecaProcessor
SCREAMING_SNAKE_CASE_ : Union[bool, str] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
def __call__( self ,UpperCAmelCase_ ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_lowercase : Any = [{"""input_values""": feature["""input_values"""]} for feature in features]
_lowercase : Union[str, Any] = [{"""input_ids""": feature["""labels"""]} for feature in features]
_lowercase : List[Any] = self.processor.pad(
UpperCAmelCase_ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,)
_lowercase : Union[str, Any] = self.processor.pad(
labels=UpperCAmelCase_ ,padding=self.padding ,max_length=self.max_length_labels ,pad_to_multiple_of=self.pad_to_multiple_of_labels ,return_tensors="""pt""" ,)
# replace padding with -100 to ignore loss correctly
_lowercase : Optional[Any] = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) ,-1_00 )
_lowercase : Optional[Any] = labels
return batch
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
model.train()
_lowercase : List[str] = self._prepare_inputs(UpperCAmelCase_ )
if self.use_amp:
with autocast():
_lowercase : Optional[int] = self.compute_loss(UpperCAmelCase_ ,UpperCAmelCase_ )
else:
_lowercase : List[str] = self.compute_loss(UpperCAmelCase_ ,UpperCAmelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_lowercase : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowercase : Tuple = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_lowercase : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase_ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase_ )
else:
loss.backward()
return loss.detach()
def __SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_lowercase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , __UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_lowercase : List[Any] = datasets.load_dataset(
"""common_voice""" , data_args.dataset_config_name , split=data_args.train_split_name )
_lowercase : Optional[Any] = datasets.load_dataset("""common_voice""" , data_args.dataset_config_name , split="""test""" )
# Create and save tokenizer
_lowercase : Any = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(__UpperCAmelCase ):
_lowercase : Optional[Any] = re.sub(__UpperCAmelCase , """""" , batch["""sentence"""] ).lower() + """ """
return batch
_lowercase : Optional[Any] = train_dataset.map(__UpperCAmelCase , remove_columns=["""sentence"""] )
_lowercase : Optional[Any] = eval_dataset.map(__UpperCAmelCase , remove_columns=["""sentence"""] )
def extract_all_chars(__UpperCAmelCase ):
_lowercase : str = """ """.join(batch["""text"""] )
_lowercase : Any = list(set(__UpperCAmelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
_lowercase : Dict = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , batch_size=-1 , keep_in_memory=__UpperCAmelCase , remove_columns=train_dataset.column_names , )
_lowercase : Tuple = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , batch_size=-1 , keep_in_memory=__UpperCAmelCase , remove_columns=eval_dataset.column_names , )
_lowercase : Optional[int] = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
_lowercase : Dict = {v: k for k, v in enumerate(__UpperCAmelCase )}
_lowercase : int = vocab_dict[""" """]
del vocab_dict[" "]
_lowercase : Union[str, Any] = len(__UpperCAmelCase )
_lowercase : Optional[int] = len(__UpperCAmelCase )
with open("""vocab.json""" , """w""" ) as vocab_file:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[str] = WavaVecaCTCTokenizer(
"""vocab.json""" , unk_token="""[UNK]""" , pad_token="""[PAD]""" , word_delimiter_token="""|""" , )
_lowercase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase )
_lowercase : Union[str, Any] = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
_lowercase : List[str] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="""mean""" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_lowercase : Dict = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
_lowercase : List[Any] = train_dataset.select(range(__UpperCAmelCase ) )
if data_args.max_val_samples is not None:
_lowercase : str = eval_dataset.select(range(data_args.max_val_samples ) )
_lowercase : Union[str, Any] = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__UpperCAmelCase ):
_lowercase , _lowercase : int = torchaudio.load(batch["""path"""] )
_lowercase : Any = resampler(__UpperCAmelCase ).squeeze().numpy()
_lowercase : Tuple = 16000
_lowercase : Any = batch["""text"""]
return batch
_lowercase : str = train_dataset.map(
__UpperCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_lowercase : int = eval_dataset.map(
__UpperCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__UpperCAmelCase ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_lowercase : Union[str, Any] = processor(
audio=batch["""speech"""] , text=batch["""target_text"""] , sampling_rate=batch["""sampling_rate"""][0] )
batch.update(__UpperCAmelCase )
return batch
_lowercase : Union[str, Any] = train_dataset.map(
__UpperCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , )
_lowercase : Dict = eval_dataset.map(
__UpperCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
_lowercase : Dict = datasets.load_metric("""wer""" )
def compute_metrics(__UpperCAmelCase ):
_lowercase : int = pred.predictions
_lowercase : str = np.argmax(__UpperCAmelCase , axis=-1 )
_lowercase : Optional[Any] = processor.tokenizer.pad_token_id
_lowercase : Tuple = processor.batch_decode(__UpperCAmelCase )
# we do not want to group tokens when computing the metrics
_lowercase : Optional[int] = processor.batch_decode(pred.label_ids , group_tokens=__UpperCAmelCase )
_lowercase : str = wer_metric.compute(predictions=__UpperCAmelCase , references=__UpperCAmelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_lowercase : Any = DataCollatorCTCWithPadding(processor=__UpperCAmelCase , padding=__UpperCAmelCase )
# Initialize our Trainer
_lowercase : Dict = CTCTrainer(
model=__UpperCAmelCase , data_collator=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_lowercase : int = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_lowercase : List[str] = model_args.model_name_or_path
else:
_lowercase : Dict = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_lowercase : List[Any] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model()
_lowercase : int = train_result.metrics
_lowercase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
_lowercase : int = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics("""train""" , __UpperCAmelCase )
trainer.save_metrics("""train""" , __UpperCAmelCase )
trainer.save_state()
# Evaluation
_lowercase : List[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowercase : Tuple = trainer.evaluate()
_lowercase : Optional[Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(__UpperCAmelCase )
_lowercase : Dict = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics("""eval""" , __UpperCAmelCase )
trainer.save_metrics("""eval""" , __UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 336 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ):
import pyspark
def generate_fn():
_lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
_lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" )
_lowercase : int = partition_df.collect()
_lowercase : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,):
_lowercase : Union[str, Any] = df
_lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
@property
def lowerCamelCase__ ( self ):
return len(self.partition_order )
class UpperCamelCase ( datasets.DatasetBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = SparkConfig
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
import pyspark
_lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowercase : List[Any] = df
_lowercase : int = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ )
_lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ ,"""a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowercase : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowerCamelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
_lowercase : List[str] = self.df.count()
_lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowercase : Union[str, Any] = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowercase : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) )
_lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
import pyspark
_lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
_lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
_lowercase : Any = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowercase : Union[str, Any] = self.config.features
_lowercase : Optional[int] = self._writer_batch_size
_lowercase : Optional[Any] = self._fs.storage_options
def write_arrow(UpperCAmelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowercase : Any = pyspark.TaskContext().taskAttemptId()
_lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
_lowercase : List[Any] = 0
_lowercase : int = writer_class(
features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Optional[int] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowercase , _lowercase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
shard_id += 1
_lowercase : Union[str, Any] = writer_class(
features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Dict = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
_lowercase , _lowercase : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
_lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : List[str] = (
self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
self._validate_cache_dir()
_lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
_lowercase : Optional[int] = not is_remote_filesystem(self._fs )
_lowercase : Dict = os.path.join if is_local else posixpath.join
_lowercase : int = """-TTTTT-SSSSS-of-NNNNN"""
_lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ )
_lowercase : List[Any] = 0
_lowercase : Optional[Any] = 0
_lowercase : int = 0
_lowercase : Any = []
_lowercase : Any = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
_lowercase : Optional[int] = total_num_examples
_lowercase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_lowercase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowercase : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
rename(
UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,)
_lowercase : Optional[Any] = []
_lowercase : List[str] = 0
for i in range(len(UpperCAmelCase_ ) ):
_lowercase , _lowercase : List[str] = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
_lowercase : Tuple = 0
_lowercase : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,):
return SparkExamplesIterable(self.df )
| 336 | 1 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
UpperCAmelCase: str = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 336 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = """<s>"""
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(UpperCAmelCase_ ) ,10_06 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] )
_lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self ):
# fmt: off
_lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 336 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=12 ,UpperCAmelCase_=7 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=99 ,UpperCAmelCase_=32 ,UpperCAmelCase_=32 ,UpperCAmelCase_=2 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=0 ,UpperCAmelCase_=None ,):
_lowercase : Dict = parent
_lowercase : int = batch_size
_lowercase : int = seq_length
_lowercase : Any = is_training
_lowercase : Dict = use_input_mask
_lowercase : str = use_labels
_lowercase : int = vocab_size
_lowercase : Tuple = hidden_size
_lowercase : Dict = projection_dim
_lowercase : Tuple = num_hidden_layers
_lowercase : Any = num_attention_heads
_lowercase : Any = intermediate_size
_lowercase : Dict = dropout
_lowercase : Tuple = attention_dropout
_lowercase : Dict = max_position_embeddings
_lowercase : List[str] = initializer_range
_lowercase : Optional[int] = scope
_lowercase : Optional[int] = bos_token_id
def lowerCamelCase__ ( self ):
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase : Optional[Any] = None
if self.use_input_mask:
_lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_lowercase : List[str] = input_mask.numpy()
_lowercase , _lowercase : Dict = input_mask.shape
_lowercase : Dict = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
_lowercase : int = 1
_lowercase : Union[str, Any] = 0
_lowercase : Any = self.get_config()
return config, input_ids, tf.convert_to_tensor(UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Dict = TFBlipTextModel(config=UpperCAmelCase_ )
_lowercase : List[Any] = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,training=UpperCAmelCase_ )
_lowercase : Tuple = model(UpperCAmelCase_ ,training=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = (TFBlipTextModel,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : List[str] = False
def lowerCamelCase__ ( self ):
_lowercase : Tuple = BlipTextModelTester(self )
_lowercase : int = ConfigTester(self ,config_class=UpperCAmelCase_ ,hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def lowerCamelCase__ ( self ):
pass
@slow
def lowerCamelCase__ ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = TFBlipTextModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=UpperCAmelCase_ )
| 336 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : List[str] = 0
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : str = Path(UpperCAmelCase_ ) / """preprocessor_config.json"""
_lowercase : Dict = Path(UpperCAmelCase_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(UpperCAmelCase_ ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(UpperCAmelCase_ ,"""w""" ) )
_lowercase : List[str] = AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : Dict = Path(UpperCAmelCase_ ) / """preprocessor_config.json"""
_lowercase : int = Path(UpperCAmelCase_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(UpperCAmelCase_ ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(UpperCAmelCase_ ,"""w""" ) )
_lowercase : List[str] = AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_lowercase : Optional[Any] = Path(UpperCAmelCase_ ) / """preprocessor_config.json"""
_lowercase : Optional[int] = Path(UpperCAmelCase_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(UpperCAmelCase_ ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(UpperCAmelCase_ ,"""w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_lowercase : List[Any] = AutoImageProcessor.from_pretrained(UpperCAmelCase_ ).to_dict()
config_dict.pop("""image_processor_type""" )
_lowercase : Union[str, Any] = CLIPImageProcessor(**UpperCAmelCase_ )
# save in new folder
model_config.save_pretrained(UpperCAmelCase_ )
config.save_pretrained(UpperCAmelCase_ )
_lowercase : Any = AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
# make sure private variable is not incorrectly saved
_lowercase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : str = Path(UpperCAmelCase_ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(UpperCAmelCase_ ,"""w""" ) ,)
_lowercase : Tuple = AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
with self.assertRaisesRegex(
UpperCAmelCase_ ,"""clip-base is not a local folder and is not a valid model identifier""" ):
_lowercase : int = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCamelCase__ ( self ):
with self.assertRaisesRegex(
UpperCAmelCase_ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_lowercase : Tuple = AutoImageProcessor.from_pretrained(UpperCAmelCase_ ,revision="""aaaaaa""" )
def lowerCamelCase__ ( self ):
with self.assertRaisesRegex(
UpperCAmelCase_ ,"""hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" ,):
_lowercase : List[Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase__ ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
_lowercase : str = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
_lowercase : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=UpperCAmelCase_ )
_lowercase : List[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=UpperCAmelCase_ )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase_ )
_lowercase : List[Any] = AutoImageProcessor.from_pretrained(UpperCAmelCase_ ,trust_remote_code=UpperCAmelCase_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ ,"""NewImageProcessor""" )
def lowerCamelCase__ ( self ):
try:
AutoConfig.register("""custom""" ,UpperCAmelCase_ )
AutoImageProcessor.register(UpperCAmelCase_ ,UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoImageProcessor.register(UpperCAmelCase_ ,UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : Optional[int] = Path(UpperCAmelCase_ ) / """preprocessor_config.json"""
_lowercase : Tuple = Path(UpperCAmelCase_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(UpperCAmelCase_ ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(UpperCAmelCase_ ,"""w""" ) )
_lowercase : List[Any] = CustomImageProcessor.from_pretrained(UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase_ )
_lowercase : Optional[int] = AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self ):
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = True
try:
AutoConfig.register("""custom""" ,UpperCAmelCase_ )
AutoImageProcessor.register(UpperCAmelCase_ ,UpperCAmelCase_ )
# If remote code is not set, the default is to use local
_lowercase : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_lowercase : int = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=UpperCAmelCase_ )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_lowercase : Any = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=UpperCAmelCase_ )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(not hasattr(UpperCAmelCase_ ,"""is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 336 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = []
for line in lines:
_lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(__UpperCAmelCase )
_lowercase : Tuple = """\n""".join(__UpperCAmelCase )
# Make a hash from all this code
_lowercase : Tuple = full_str.encode("""utf-8""" )
return shaaaa(__UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase: Tuple = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase: List[str] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
UpperCAmelCase: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 336 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : int
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_lowercase : Tuple = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowercase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_lowercase : Optional[Any] = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_lowercase : int = [""""""] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """
UpperCAmelCase: int = input(entry_msg).strip()
UpperCAmelCase: List[str] = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 336 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336 | 1 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
UpperCAmelCase: Dict = numpy.array([0, 0])
UpperCAmelCase: Tuple = numpy.array([0.5, 0.8_660_254])
UpperCAmelCase: Any = numpy.array([1, 0])
UpperCAmelCase: Dict = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Tuple = initial_vectors
for _ in range(_UpperCAmelCase ):
_lowercase : Tuple = iteration_step(_UpperCAmelCase )
return vectors
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : List[Any] = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowercase : Union[str, Any] = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
_lowercase : Union[str, Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : int = numpy.radians(_UpperCAmelCase )
_lowercase : Tuple = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
_lowercase : Union[str, Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : List[str] = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowercase : List[Any] = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase: Optional[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 350 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCAmelCase: Any = generate_large_matrix()
UpperCAmelCase: Dict = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
_lowercase : List[Any] = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowercase : Tuple = (left + right) // 2
_lowercase : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowercase : Dict = mid + 1
else:
_lowercase : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Any = 0
_lowercase : Optional[int] = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print("""Running benchmarks""" )
_lowercase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 336 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase: Dict = logging.get_logger(__name__)
UpperCAmelCase: Optional[int] = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class UpperCamelCase ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "funnel"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=[4, 4, 4] ,UpperCAmelCase_=None ,UpperCAmelCase_=2 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=64 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu_new" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=None ,UpperCAmelCase_=1E-9 ,UpperCAmelCase_="mean" ,UpperCAmelCase_="relative_shift" ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
_lowercase : int = vocab_size
_lowercase : List[str] = block_sizes
_lowercase : List[str] = [1] * len(__A ) if block_repeats is None else block_repeats
assert len(__A ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_lowercase : Tuple = num_decoder_layers
_lowercase : Dict = d_model
_lowercase : str = n_head
_lowercase : Dict = d_head
_lowercase : Optional[int] = d_inner
_lowercase : int = hidden_act
_lowercase : List[Any] = hidden_dropout
_lowercase : int = attention_dropout
_lowercase : Optional[Any] = activation_dropout
_lowercase : List[Any] = initializer_range
_lowercase : int = initializer_std
_lowercase : Any = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_lowercase : Dict = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_lowercase : str = attention_type
_lowercase : str = separate_cls
_lowercase : List[Any] = truncate_seq
_lowercase : Tuple = pool_q_only
super().__init__(**__A )
@property
def lowerCamelCase__ ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def lowerCamelCase__ ( self ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 351 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase: List[str] = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase: int = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 336 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase: Optional[Any] = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[str] = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase: Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 352 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase : str = []
for i in range(__UpperCAmelCase ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ : str = 2
@register_to_config
def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,):
if trained_betas is not None:
_lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Any = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowercase : Tuple = 1.0 - self.betas
_lowercase : Dict = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
if schedule_timesteps is None:
_lowercase : Optional[int] = self.timesteps
_lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0
else:
_lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
_lowercase : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : str = self.index_for_timestep(UpperCAmelCase_ )
if self.state_in_first_order:
_lowercase : Optional[Any] = self.sigmas[step_index]
else:
_lowercase : Dict = self.sigmas_interpol[step_index]
_lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,):
_lowercase : List[str] = num_inference_steps
_lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ )
_lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ )
# interpolate sigmas
_lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
_lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowercase : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
# mps does not support float64
_lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa )
else:
_lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
# interpolate timesteps
_lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype )
_lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
_lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowercase : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# get log sigma
_lowercase : Optional[Any] = sigma.log()
# get distribution
_lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowercase : List[Any] = low_idx + 1
_lowercase : int = self.log_sigmas[low_idx]
_lowercase : Any = self.log_sigmas[high_idx]
# interpolate sigmas
_lowercase : Any = (low - log_sigma) / (low - high)
_lowercase : Dict = w.clamp(0 ,1 )
# transform interpolation to time range
_lowercase : List[str] = (1 - w) * low_idx + w * high_idx
_lowercase : Optional[int] = t.view(sigma.shape )
return t
@property
def lowerCamelCase__ ( self ):
return self.sample is None
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ )
# advance index counter by 1
_lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowercase : Any = self.sigmas[step_index]
_lowercase : Any = self.sigmas_interpol[step_index + 1]
_lowercase : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowercase : Union[str, Any] = self.sigmas[step_index - 1]
_lowercase : int = self.sigmas_interpol[step_index]
_lowercase : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowercase : Any = 0
_lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowercase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
_lowercase : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowercase : Optional[Any] = sigma_next - sigma_hat
_lowercase : Any = self.sample
_lowercase : Optional[int] = None
_lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ):
# mps does not support float64
_lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_lowercase : List[Any] = self.timesteps.to(original_samples.device )
_lowercase : Union[str, Any] = timesteps.to(original_samples.device )
_lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps]
_lowercase : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowercase : List[Any] = sigma.unsqueeze(-1 )
_lowercase : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 336 | 0 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase: List[str] = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class UpperCamelCase ( A__ ):
"""simple docstring"""
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
super().__init__(*__snake_case ,**__snake_case )
self.check_model_type(__snake_case )
def lowerCamelCase__ ( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,**UpperCAmelCase_ ):
_lowercase , _lowercase : Tuple = {}, {}
if padding is not None:
_lowercase : str = padding
if truncation is not None:
_lowercase : List[str] = truncation
if top_k is not None:
_lowercase : Union[str, Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ):
if isinstance(__snake_case ,(Image.Image, str) ) and isinstance(__snake_case ,__snake_case ):
_lowercase : int = {"""image""": image, """question""": question}
else:
_lowercase : Tuple = image
_lowercase : str = super().__call__(__snake_case ,**__snake_case )
return results
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=False ,UpperCAmelCase_=False ):
_lowercase : int = load_image(inputs["""image"""] )
_lowercase : List[Any] = self.tokenizer(
inputs["""question"""] ,return_tensors=self.framework ,padding=__snake_case ,truncation=__snake_case )
_lowercase : str = self.image_processor(images=__snake_case ,return_tensors=self.framework )
model_inputs.update(__snake_case )
return model_inputs
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.model(**__snake_case )
return model_outputs
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=5 ):
if top_k > self.model.config.num_labels:
_lowercase : Any = self.model.config.num_labels
if self.framework == "pt":
_lowercase : Dict = model_outputs.logits.sigmoid()[0]
_lowercase , _lowercase : Optional[Any] = probs.topk(__snake_case )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
_lowercase : Optional[int] = scores.tolist()
_lowercase : Optional[int] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__snake_case ,__snake_case )]
| 353 |
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 336 | 0 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float = 1 / sqrt(2 ) ):
_lowercase : Dict = tau * frequency / samplerate
_lowercase : Union[str, Any] = sin(lowerCamelCase__ )
_lowercase : str = cos(lowerCamelCase__ )
_lowercase : Optional[int] = _sin / (2 * q_factor)
_lowercase : Dict = (1 - _cos) / 2
_lowercase : Optional[int] = 1 - _cos
_lowercase : List[Any] = 1 + alpha
_lowercase : str = -2 * _cos
_lowercase : Optional[int] = 1 - alpha
_lowercase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float = 1 / sqrt(2 ) ):
_lowercase : Union[str, Any] = tau * frequency / samplerate
_lowercase : str = sin(lowerCamelCase__ )
_lowercase : str = cos(lowerCamelCase__ )
_lowercase : Union[str, Any] = _sin / (2 * q_factor)
_lowercase : str = (1 + _cos) / 2
_lowercase : Optional[int] = -1 - _cos
_lowercase : Optional[Any] = 1 + alpha
_lowercase : int = -2 * _cos
_lowercase : Tuple = 1 - alpha
_lowercase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float = 1 / sqrt(2 ) ):
_lowercase : Optional[Any] = tau * frequency / samplerate
_lowercase : List[str] = sin(lowerCamelCase__ )
_lowercase : str = cos(lowerCamelCase__ )
_lowercase : Optional[Any] = _sin / (2 * q_factor)
_lowercase : List[str] = _sin / 2
_lowercase : Optional[int] = 0
_lowercase : List[str] = -ba
_lowercase : Optional[Any] = 1 + alpha
_lowercase : Optional[int] = -2 * _cos
_lowercase : Tuple = 1 - alpha
_lowercase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float = 1 / sqrt(2 ) ):
_lowercase : Dict = tau * frequency / samplerate
_lowercase : List[Any] = sin(lowerCamelCase__ )
_lowercase : str = cos(lowerCamelCase__ )
_lowercase : Optional[int] = _sin / (2 * q_factor)
_lowercase : Optional[Any] = 1 - alpha
_lowercase : List[str] = -2 * _cos
_lowercase : Tuple = 1 + alpha
_lowercase : Any = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : float = 1 / sqrt(2 ) , ):
_lowercase : str = tau * frequency / samplerate
_lowercase : str = sin(lowerCamelCase__ )
_lowercase : Union[str, Any] = cos(lowerCamelCase__ )
_lowercase : Any = _sin / (2 * q_factor)
_lowercase : Tuple = 10 ** (gain_db / 40)
_lowercase : List[str] = 1 + alpha * big_a
_lowercase : str = -2 * _cos
_lowercase : Optional[int] = 1 - alpha * big_a
_lowercase : List[Any] = 1 + alpha / big_a
_lowercase : Dict = -2 * _cos
_lowercase : Optional[Any] = 1 - alpha / big_a
_lowercase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : float = 1 / sqrt(2 ) , ):
_lowercase : Any = tau * frequency / samplerate
_lowercase : Any = sin(lowerCamelCase__ )
_lowercase : Any = cos(lowerCamelCase__ )
_lowercase : str = _sin / (2 * q_factor)
_lowercase : Tuple = 10 ** (gain_db / 40)
_lowercase : List[Any] = (big_a + 1) - (big_a - 1) * _cos
_lowercase : int = (big_a + 1) + (big_a - 1) * _cos
_lowercase : Dict = (big_a - 1) - (big_a + 1) * _cos
_lowercase : str = (big_a - 1) + (big_a + 1) * _cos
_lowercase : List[str] = 2 * sqrt(lowerCamelCase__ ) * alpha
_lowercase : Optional[Any] = big_a * (pmc + aaa)
_lowercase : Any = 2 * big_a * mpc
_lowercase : int = big_a * (pmc - aaa)
_lowercase : Optional[int] = ppmc + aaa
_lowercase : int = -2 * pmpc
_lowercase : Tuple = ppmc - aaa
_lowercase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : float = 1 / sqrt(2 ) , ):
_lowercase : Any = tau * frequency / samplerate
_lowercase : int = sin(lowerCamelCase__ )
_lowercase : str = cos(lowerCamelCase__ )
_lowercase : Tuple = _sin / (2 * q_factor)
_lowercase : Any = 10 ** (gain_db / 40)
_lowercase : List[str] = (big_a + 1) - (big_a - 1) * _cos
_lowercase : Any = (big_a + 1) + (big_a - 1) * _cos
_lowercase : Tuple = (big_a - 1) - (big_a + 1) * _cos
_lowercase : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
_lowercase : Any = 2 * sqrt(lowerCamelCase__ ) * alpha
_lowercase : Optional[int] = big_a * (ppmc + aaa)
_lowercase : Optional[Any] = -2 * big_a * pmpc
_lowercase : Union[str, Any] = big_a * (ppmc - aaa)
_lowercase : Tuple = pmc + aaa
_lowercase : List[str] = 2 * mpc
_lowercase : Optional[Any] = pmc - aaa
_lowercase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 354 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : int
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_lowercase : Tuple = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowercase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_lowercase : Optional[Any] = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_lowercase : int = [""""""] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """
UpperCAmelCase: int = input(entry_msg).strip()
UpperCAmelCase: List[str] = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 336 | 0 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCAmelCase: Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
SCREAMING_SNAKE_CASE_ : str = "utf-8"
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : bool = True # deprecated
SCREAMING_SNAKE_CASE_ : Optional[int] = None # deprecated
SCREAMING_SNAKE_CASE_ : int = 1_0 << 2_0 # 10MB
SCREAMING_SNAKE_CASE_ : Optional[bool] = None
class UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = JsonConfig
def lowerCamelCase__ ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
_lowercase : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowercase : Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a ,(str, list, tuple) ):
_lowercase : Any = data_files
if isinstance(_a ,_a ):
_lowercase : Any = [files]
_lowercase : int = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""files""": files} )]
_lowercase : str = []
for split_name, files in data_files.items():
if isinstance(_a ,_a ):
_lowercase : int = [files]
_lowercase : List[Any] = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a ,gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_lowercase : Any = self.config.features.arrow_schema.field(_a ).type
_lowercase : str = pa_table.append_column(_a ,pa.array([None] * len(_a ) ,type=_a ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowercase : Tuple = table_cast(_a ,self.config.features.arrow_schema )
return pa_table
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_a ,encoding=self.config.encoding ,errors=self.config.encoding_errors ) as f:
_lowercase : Optional[Any] = json.load(_a )
# We keep only the field we are interested in
_lowercase : Any = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_a ,(list, tuple) ):
_lowercase : Any = set().union(*[row.keys() for row in dataset] )
_lowercase : List[Any] = {col: [row.get(_a ) for row in dataset] for col in keys}
else:
_lowercase : Optional[int] = dataset
_lowercase : str = pa.Table.from_pydict(_a )
yield file_idx, self._cast_table(_a )
# If the file has one json object per line
else:
with open(_a ,"""rb""" ) as f:
_lowercase : Union[str, Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_lowercase : Any = max(self.config.chunksize // 32 ,16 << 10 )
_lowercase : Union[str, Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
_lowercase : Optional[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_a )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_lowercase : str = batch.decode(self.config.encoding ,errors=_a ).encode("""utf-8""" )
try:
while True:
try:
_lowercase : Optional[Any] = paj.read_json(
io.BytesIO(_a ) ,read_options=paj.ReadOptions(block_size=_a ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_a ,pa.ArrowInvalid )
and "straddling" not in str(_a )
or block_size > len(_a )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(_a )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_a ,encoding=self.config.encoding ,errors=self.config.encoding_errors ) as f:
_lowercase : Tuple = json.load(_a )
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(_a )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_a ,_a ): # list is the only sequence type supported in JSON
try:
_lowercase : Tuple = set().union(*[row.keys() for row in dataset] )
_lowercase : Tuple = {col: [row.get(_a ) for row in dataset] for col in keys}
_lowercase : List[Any] = pa.Table.from_pydict(_a )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(_a )}: {e}""" )
raise ValueError(f"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(_a )
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(_a )}: {e}""" )
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
batch_idx += 1
| 355 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
_lowercase : Tuple = randint(-5000 , 5000 )
return (arr, r)
UpperCAmelCase: int = make_dataset()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
for triplet in permutations(__UpperCAmelCase , 3 ):
if sum(__UpperCAmelCase ) == target:
return tuple(sorted(__UpperCAmelCase ) )
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
arr.sort()
_lowercase : Optional[Any] = len(__UpperCAmelCase )
for i in range(n - 1 ):
_lowercase , _lowercase : str = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Tuple = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_lowercase : Union[str, Any] = """
triplet_sum1(*dataset)
"""
_lowercase : Union[str, Any] = """
triplet_sum2(*dataset)
"""
_lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
_lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
return (min(__UpperCAmelCase ), min(__UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase: Any = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 336 | 0 |
"""simple docstring"""
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : List[Any] = np.array(__snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
_lowercase : Union[str, Any] = 0
_lowercase : Optional[Any] = 0
_lowercase : List[str] = 0
_lowercase : Union[str, Any] = 0
# compute the shape of the output matrix
_lowercase : Union[str, Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_lowercase : Tuple = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_lowercase : Any = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_lowercase : Optional[int] = 0
_lowercase : str = 0
return updated_arr
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Tuple = np.array(__snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
_lowercase : Optional[Any] = 0
_lowercase : Any = 0
_lowercase : str = 0
_lowercase : Optional[int] = 0
# compute the shape of the output matrix
_lowercase : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_lowercase : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_lowercase : List[Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_lowercase : Optional[Any] = 0
_lowercase : Tuple = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
UpperCAmelCase: int = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 356 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
# add QFormer tokenizer
_lowercase : Optional[int] = qformer_tokenizer
def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
_lowercase : List[Any] = BatchFeature()
if text is not None:
_lowercase : List[str] = self.tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
encoding.update(UpperCAmelCase_ )
_lowercase : Dict = self.qformer_tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : str = qformer_text_encoding.pop("""input_ids""" )
_lowercase : int = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
_lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.tokenizer.model_input_names
_lowercase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ )
_lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ )
return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" )
_lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
args.append(UpperCAmelCase_ )
return cls(*UpperCAmelCase_ )
| 336 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase = 0 ):
_lowercase : Dict = length or len(__A )
_lowercase : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_lowercase , _lowercase : Optional[int] = list_data[i + 1], list_data[i]
_lowercase : int = True
return list_data if not swapped else bubble_sort(__A , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase: Tuple = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : Tuple = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=5 ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2_49 ,UpperCAmelCase_=6 ,UpperCAmelCase_=17 ,UpperCAmelCase_=25 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0006 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,**UpperCAmelCase_ ,):
_lowercase : Dict = vocab_size
_lowercase : List[str] = action_weight
_lowercase : int = reward_weight
_lowercase : List[Any] = value_weight
_lowercase : List[str] = max_position_embeddings
_lowercase : Any = block_size
_lowercase : Any = action_dim
_lowercase : List[str] = observation_dim
_lowercase : Union[str, Any] = transition_dim
_lowercase : str = learning_rate
_lowercase : Tuple = n_layer
_lowercase : Optional[int] = n_head
_lowercase : List[str] = n_embd
_lowercase : List[str] = embd_pdrop
_lowercase : Optional[Any] = attn_pdrop
_lowercase : List[Any] = resid_pdrop
_lowercase : str = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : List[Any] = kaiming_initializer_range
_lowercase : List[Any] = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
| 336 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCAmelCase: Optional[Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class UpperCamelCase ( lowercase__ ):
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase_ ):
_lowercase : Optional[int] = parser.add_parser(
"""convert""" ,help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" ,)
train_parser.add_argument("""--model_type""" ,type=_UpperCamelCase ,required=_UpperCamelCase ,help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" ,type=_UpperCamelCase ,required=_UpperCamelCase ,help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" ,type=_UpperCamelCase ,required=_UpperCamelCase ,help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" ,type=_UpperCamelCase ,default="""""" ,help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" ,type=_UpperCamelCase ,default=_UpperCamelCase ,help="""Optional fine-tuning task name if the TF model was a finetuned model.""" ,)
train_parser.set_defaults(func=_UpperCamelCase )
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,*UpperCAmelCase_ ,):
_lowercase : Dict = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f"""Loading model {model_type}""" )
_lowercase : str = model_type
_lowercase : Dict = tf_checkpoint
_lowercase : List[Any] = pytorch_dump_output
_lowercase : int = config
_lowercase : int = finetuning_task_name
def lowerCamelCase__ ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
if "ckpt" in self._tf_checkpoint.lower():
_lowercase : Optional[int] = self._tf_checkpoint
_lowercase : List[str] = """"""
else:
_lowercase : Any = self._tf_checkpoint
_lowercase : Tuple = """"""
convert_transfo_xl_checkpoint_to_pytorch(
_UpperCamelCase ,self._config ,self._pytorch_dump_output ,_UpperCamelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint ,self._config ,self._pytorch_dump_output ,self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 358 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase: Any = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model"
def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Tuple = intermediate_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = patch_size
_lowercase : Optional[Any] = image_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = attention_dropout
_lowercase : List[Any] = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : Tuple = qkv_bias
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer"
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : List[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Optional[Any] = hidden_act
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Tuple = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : Dict = cross_attention_frequency
_lowercase : Optional[Any] = encoder_hidden_size
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "instructblip"
SCREAMING_SNAKE_CASE_ : List[str] = True
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ):
super().__init__(**UpperCAmelCase_ )
if vision_config is None:
_lowercase : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_lowercase : Any = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_lowercase : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ )
_lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ )
_lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : Union[str, Any] = self.text_config.is_encoder_decoder
_lowercase : List[str] = num_query_tokens
_lowercase : List[str] = self.vision_config.hidden_size
_lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Union[str, Any] = 1.0
_lowercase : Dict = 0.02
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowercase : int = self.vision_config.to_dict()
_lowercase : Any = self.qformer_config.to_dict()
_lowercase : Any = self.text_config.to_dict()
_lowercase : Optional[int] = self.__class__.model_type
return output
| 336 | 0 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCAmelCase: Dict = """__DUMMY_TRANSFORMERS_USER__"""
UpperCAmelCase: str = """Dummy User"""
UpperCAmelCase: Optional[int] = """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"""
UpperCAmelCase: Any = """https://hub-ci.huggingface.co"""
UpperCAmelCase: Any = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
UpperCAmelCase: Optional[Any] = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
UpperCAmelCase: Optional[Any] = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
HfFolder.save_token(__UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __SCREAMING_SNAKE_CASE ( ):
return HfApi(endpoint=__UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = HfFolder.get_token()
HfFolder.save_token(__UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__UpperCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Dict = F"""repo_txt_data-{int(time.time() * 1_0E3 )}"""
_lowercase : List[Any] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Dict = F"""repo_zipped_txt_data-{int(time.time() * 1_0E3 )}"""
_lowercase : Dict = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : List[str] = F"""repo_zipped_img_data-{int(time.time() * 1_0E3 )}"""
_lowercase : Dict = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
return hf_private_dataset_repo_zipped_img_data_
| 359 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase ( A_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MobileBertTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = filter_non_english
SCREAMING_SNAKE_CASE_ : List[str] = "google/mobilebert-uncased"
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Optional[int] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowercase : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_lowercase : Optional[int] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = "UNwant\u00E9d,running"
_lowercase : Optional[int] = "unwanted, running"
return input_text, output_text
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = self.tokenizer_class(self.vocab_file )
_lowercase : Optional[int] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case__ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,[9, 6, 7, 12, 10, 11] )
def lowerCamelCase__ ( self ):
if not self.test_rust_tokenizer:
return
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : Optional[int] = self.get_rust_tokenizer()
_lowercase : List[str] = "UNwant\u00E9d,running"
_lowercase : str = tokenizer.tokenize(snake_case__ )
_lowercase : Optional[int] = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
_lowercase : str = tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
_lowercase : List[str] = rust_tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
_lowercase : int = self.get_rust_tokenizer()
_lowercase : str = tokenizer.encode(snake_case__ )
_lowercase : Optional[Any] = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# With lower casing
_lowercase : Optional[int] = self.get_tokenizer(do_lower_case=snake_case__ )
_lowercase : Optional[int] = self.get_rust_tokenizer(do_lower_case=snake_case__ )
_lowercase : Any = "UNwant\u00E9d,running"
_lowercase : str = tokenizer.tokenize(snake_case__ )
_lowercase : Optional[Any] = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
_lowercase : Dict = tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
_lowercase : List[str] = rust_tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
_lowercase : str = self.get_rust_tokenizer()
_lowercase : List[str] = tokenizer.encode(snake_case__ )
_lowercase : Optional[Any] = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def lowerCamelCase__ ( self ):
_lowercase : Tuple = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowerCamelCase__ ( self ):
_lowercase : Dict = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def lowerCamelCase__ ( self ):
_lowercase : Tuple = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def lowerCamelCase__ ( self ):
_lowercase : Tuple = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def lowerCamelCase__ ( self ):
_lowercase : Tuple = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self ):
_lowercase : Any = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = BasicTokenizer(do_lower_case=snake_case__ ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowerCamelCase__ ( self ):
_lowercase : Any = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_lowercase : Optional[int] = {}
for i, token in enumerate(snake_case__ ):
_lowercase : Optional[int] = i
_lowercase : Optional[Any] = WordpieceTokenizer(vocab=snake_case__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def lowerCamelCase__ ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowerCamelCase__ ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowerCamelCase__ ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(snake_case__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(snake_case__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def lowerCamelCase__ ( self ):
_lowercase : List[str] = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=snake_case__ )
_lowercase : Union[str, Any] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=snake_case__ )
_lowercase : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case__ ,snake_case__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : Dict = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
_lowercase : Tuple = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_lowercase : str = tokenizer_r.encode_plus(
snake_case__ ,return_attention_mask=snake_case__ ,return_token_type_ids=snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ ,)
_lowercase : Dict = tokenizer_r.do_lower_case if hasattr(snake_case__ ,"""do_lower_case""" ) else False
_lowercase : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def lowerCamelCase__ ( self ):
_lowercase : str = ["的", "人", "有"]
_lowercase : Dict = "".join(snake_case__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : str = True
_lowercase : Dict = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
_lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
_lowercase : Tuple = tokenizer_p.encode(snake_case__ ,add_special_tokens=snake_case__ )
_lowercase : int = tokenizer_r.encode(snake_case__ ,add_special_tokens=snake_case__ )
_lowercase : List[str] = tokenizer_r.convert_ids_to_tokens(snake_case__ )
_lowercase : List[Any] = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(snake_case__ ,snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
_lowercase : List[str] = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
_lowercase : int = tokenizer_r.encode(snake_case__ ,add_special_tokens=snake_case__ )
_lowercase : Optional[Any] = tokenizer_p.encode(snake_case__ ,add_special_tokens=snake_case__ )
_lowercase : Any = tokenizer_r.convert_ids_to_tokens(snake_case__ )
_lowercase : List[Any] = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that only the first Chinese character is not preceded by "##".
_lowercase : Optional[int] = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(snake_case__ )
]
self.assertListEqual(snake_case__ ,snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
| 360 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Dict = {"""unk_token""": """<unk>"""}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,UpperCAmelCase_ )
self.assertIn("""attention_mask""" ,UpperCAmelCase_ )
self.assertNotIn("""labels""" ,UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : List[Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ["""A long paragraph for summarization."""]
_lowercase : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : Union[str, Any] = inputs["""input_ids"""]
_lowercase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : str = ["""Summary of the text.""", """Another summary."""]
_lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ )
_lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]]
_lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = """A, <mask> AllenNLP sentence."""
_lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 336 | 0 |
"""simple docstring"""
UpperCAmelCase: Optional[Any] = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
UpperCAmelCase: Union[str, Any] = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : List[str] = from_type.lower().strip("""s""" )
_lowercase : Union[str, Any] = to_type.lower().strip("""s""" )
_lowercase : Optional[Any] = UNIT_SYMBOL.get(lowerCAmelCase__ , lowerCAmelCase__ )
_lowercase : Tuple = UNIT_SYMBOL.get(lowerCAmelCase__ , lowerCAmelCase__ )
if from_sanitized not in METRIC_CONVERSION:
_lowercase : Dict = (
F"""Invalid \'from_type\' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowerCAmelCase__ )}"""
)
raise ValueError(lowerCAmelCase__ )
if to_sanitized not in METRIC_CONVERSION:
_lowercase : Dict = (
F"""Invalid \'to_type\' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowerCAmelCase__ )}"""
)
raise ValueError(lowerCAmelCase__ )
_lowercase : Optional[int] = METRIC_CONVERSION[from_sanitized]
_lowercase : List[str] = METRIC_CONVERSION[to_sanitized]
_lowercase : Union[str, Any] = 1
if from_exponent > to_exponent:
_lowercase : Optional[int] = from_exponent - to_exponent
else:
_lowercase : int = -(to_exponent - from_exponent)
return value * pow(10 , lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 361 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336 | 0 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase: Union[str, Any] = """true"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=82 , __UpperCAmelCase=16 ):
set_seed(42 )
_lowercase : Dict = RegressionModel()
_lowercase : Dict = deepcopy(lowercase__ )
_lowercase : Tuple = RegressionDataset(length=lowercase__ )
_lowercase : Optional[Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
_lowercase : Union[str, Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=False ):
_lowercase : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
_lowercase : List[Any] = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(__UpperCAmelCase ):
_lowercase : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
_lowercase : List[str] = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
_lowercase : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCAmelCase ):
if use_longest:
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Dict = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
_lowercase : List[Any] = get_dataloader(lowercase__ , not dispatch_batches )
_lowercase : str = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=lowercase__ )
_lowercase : int = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Dict = []
for batch in dataloader:
_lowercase : str = batch.values()
with torch.no_grad():
_lowercase : List[Any] = model(lowercase__ )
_lowercase : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_lowercase : Any = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
_lowercase : str = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=82 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=16 ):
_lowercase : List[str] = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
_lowercase : Any = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = False , __UpperCAmelCase = False ):
_lowercase : Tuple = evaluate.load("""glue""" , """mrpc""" )
_lowercase : Any = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
_lowercase : Any = setup['no']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
_lowercase : List[str] = model(**lowercase__ )
_lowercase : str = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch["""labels"""] )
_lowercase : List[Any] = metric.compute()
# Then do distributed
_lowercase : Dict = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
_lowercase : Any = model(**lowercase__ )
_lowercase : Optional[Any] = outputs.logits.argmax(dim=-1 )
_lowercase : Any = batch['labels']
_lowercase : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
_lowercase : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_lowercase : List[str] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
_lowercase : Optional[int] = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 362 |
"""simple docstring"""
UpperCAmelCase: List[str] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
"""simple docstring"""
UpperCAmelCase: str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase: int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 336 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_ : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ : List[str] = frozenset([] )
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : str = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=lowercase_ ,)
_lowercase : int = PNDMScheduler(skip_prk_steps=lowercase_ )
torch.manual_seed(0 )
_lowercase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
_lowercase : str = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="""gelu""" ,projection_dim=5_12 ,)
_lowercase : List[str] = CLIPTextModel(lowercase_ )
_lowercase : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowercase : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=0 ):
_lowercase : str = floats_tensor((1, 3, 32, 32) ,rng=random.Random(lowercase_ ) ).to(lowercase_ )
_lowercase : List[str] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_lowercase : Union[str, Any] = Image.fromarray(np.uinta(lowercase_ ) ).convert("""RGB""" ).resize((64, 64) )
_lowercase : List[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(lowercase_ ).startswith("""mps""" ):
_lowercase : int = torch.manual_seed(lowercase_ )
else:
_lowercase : List[str] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
_lowercase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowercase : str = self.get_dummy_components()
_lowercase : List[Any] = StableDiffusionInpaintPipeline(**lowercase_ )
_lowercase : Any = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
_lowercase : Tuple = self.get_dummy_inputs(lowercase_ )
_lowercase : Optional[Any] = sd_pipe(**lowercase_ ).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : str = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_lowercase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_lowercase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
_lowercase : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
_lowercase : Any = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ ,safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
_lowercase : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
_lowercase : Tuple = torch.manual_seed(0 )
_lowercase : Dict = pipe(
prompt=lowercase_ ,image=lowercase_ ,mask_image=lowercase_ ,generator=lowercase_ ,output_type="""np""" ,)
_lowercase : Optional[int] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCamelCase__ ( self ):
_lowercase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_lowercase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_lowercase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
_lowercase : Optional[Any] = """stabilityai/stable-diffusion-2-inpainting"""
_lowercase : Dict = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ ,torch_dtype=torch.floataa ,safety_checker=lowercase_ ,)
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
_lowercase : str = """Face of a yellow cat, high resolution, sitting on a park bench"""
_lowercase : int = torch.manual_seed(0 )
_lowercase : Tuple = pipe(
prompt=lowercase_ ,image=lowercase_ ,mask_image=lowercase_ ,generator=lowercase_ ,output_type="""np""" ,)
_lowercase : List[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCamelCase__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowercase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_lowercase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_lowercase : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
_lowercase : Dict = PNDMScheduler.from_pretrained(lowercase_ ,subfolder="""scheduler""" )
_lowercase : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ ,safety_checker=lowercase_ ,scheduler=lowercase_ ,torch_dtype=torch.floataa ,)
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowercase : Optional[int] = """Face of a yellow cat, high resolution, sitting on a park bench"""
_lowercase : Any = torch.manual_seed(0 )
_lowercase : List[str] = pipe(
prompt=lowercase_ ,image=lowercase_ ,mask_image=lowercase_ ,generator=lowercase_ ,num_inference_steps=2 ,output_type="""np""" ,)
_lowercase : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 364 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ):
_lowercase : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowercase : str = math.floor(val / multiple ) * multiple
if x < min_val:
_lowercase : Dict = math.ceil(val / multiple ) * multiple
return x
_lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size
_lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase )
_lowercase , _lowercase : Union[str, Any] = output_size
# determine new height and width
_lowercase : str = output_height / input_height
_lowercase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowercase : str = scale_width
else:
# fit height
_lowercase : int = scale_height
_lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase )
_lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase )
return (new_height, new_width)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84}
_lowercase : str = get_size_dict(UpperCAmelCase_ )
_lowercase : Tuple = do_resize
_lowercase : Any = size
_lowercase : List[Any] = keep_aspect_ratio
_lowercase : Any = ensure_multiple_of
_lowercase : str = resample
_lowercase : Optional[Any] = do_rescale
_lowercase : List[Any] = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowercase : Dict = get_resize_output_image_size(
UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,)
return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : int = get_size_dict(UpperCAmelCase_ )
_lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowercase : List[str] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : int = image_std if image_std is not None else self.image_std
_lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
_lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images]
_lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images]
_lowercase : int = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase_ ):
_lowercase : Tuple = target_sizes.numpy()
_lowercase : Optional[Any] = []
for idx in range(len(UpperCAmelCase_ ) ):
_lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ )
_lowercase : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
_lowercase : Union[str, Any] = logits.argmax(dim=1 )
_lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 336 | 0 |
"""simple docstring"""
from math import ceil, sqrt
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1000000 ):
_lowercase : Any = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_lowercase : Any = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_lowercase : List[str] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'{solution() = }')
| 365 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase: Tuple = [0, 25, 50]
UpperCAmelCase: List[Any] = [25, 50, 75]
UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase: Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase: List[Any] = np.ones(75)
UpperCAmelCase: Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase: int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase: int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 336 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase: Dict = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Dict = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: int = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Tuple = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Optional[Any] = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCAmelCase: Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 366 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_lowercase : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
_lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
_lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : List[Any] = self.get_image_processor()
_lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ )
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
_lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : int = self.prepare_image_inputs()
_lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" )
_lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : List[Any] = """lower newer"""
_lowercase : Any = processor(text=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : str = """lower newer"""
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(UpperCAmelCase_ )
_lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Optional[Any] = """lower newer"""
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
UpperCAmelCase: Optional[int] = TypeVar("""T""")
class UpperCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ = True ):
_lowercase : dict[T, list[T]] = {} # dictionary of lists
_lowercase : Dict = directed
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase__ )
self.adj_list[destination_vertex].append(lowerCAmelCase__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase__ )
_lowercase : Union[str, Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCAmelCase__ )
_lowercase : List[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowercase : List[Any] = [destination_vertex]
_lowercase : Tuple = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase__ )
_lowercase : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowercase : Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowercase : List[Any] = [destination_vertex]
_lowercase : List[str] = []
return self
def __repr__( self ):
return pformat(self.adj_list )
| 367 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ):
import pyspark
def generate_fn():
_lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
_lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" )
_lowercase : int = partition_df.collect()
_lowercase : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,):
_lowercase : Union[str, Any] = df
_lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
@property
def lowerCamelCase__ ( self ):
return len(self.partition_order )
class UpperCamelCase ( datasets.DatasetBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = SparkConfig
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
import pyspark
_lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowercase : List[Any] = df
_lowercase : int = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ )
_lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ ,"""a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowercase : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowerCamelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
_lowercase : List[str] = self.df.count()
_lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowercase : Union[str, Any] = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowercase : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) )
_lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
import pyspark
_lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
_lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
_lowercase : Any = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowercase : Union[str, Any] = self.config.features
_lowercase : Optional[int] = self._writer_batch_size
_lowercase : Optional[Any] = self._fs.storage_options
def write_arrow(UpperCAmelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowercase : Any = pyspark.TaskContext().taskAttemptId()
_lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
_lowercase : List[Any] = 0
_lowercase : int = writer_class(
features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Optional[int] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowercase , _lowercase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
shard_id += 1
_lowercase : Union[str, Any] = writer_class(
features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Dict = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
_lowercase , _lowercase : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
_lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : List[str] = (
self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
self._validate_cache_dir()
_lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
_lowercase : Optional[int] = not is_remote_filesystem(self._fs )
_lowercase : Dict = os.path.join if is_local else posixpath.join
_lowercase : int = """-TTTTT-SSSSS-of-NNNNN"""
_lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ )
_lowercase : List[Any] = 0
_lowercase : Optional[Any] = 0
_lowercase : int = 0
_lowercase : Any = []
_lowercase : Any = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
_lowercase : Optional[int] = total_num_examples
_lowercase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_lowercase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowercase : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
rename(
UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,)
_lowercase : Optional[Any] = []
_lowercase : List[str] = 0
for i in range(len(UpperCAmelCase_ ) ):
_lowercase , _lowercase : List[str] = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
_lowercase : Tuple = 0
_lowercase : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,):
return SparkExamplesIterable(self.df )
| 336 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = KandinskyVaaPriorPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = ['prompt']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['prompt', 'negative_prompt']
SCREAMING_SNAKE_CASE_ : Tuple = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE_ : Optional[int] = False
@property
def lowerCamelCase__ ( self ):
return 32
@property
def lowerCamelCase__ ( self ):
return 32
@property
def lowerCamelCase__ ( self ):
return self.time_input_dim
@property
def lowerCamelCase__ ( self ):
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ):
return 1_00
@property
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : Union[str, Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
_lowercase : Any = PriorTransformer(**_SCREAMING_SNAKE_CASE )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_lowercase : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=2_24 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=14 ,)
_lowercase : List[Any] = CLIPVisionModelWithProjection(_SCREAMING_SNAKE_CASE )
return model
@property
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPImageProcessor(
crop_size=2_24 ,do_center_crop=_SCREAMING_SNAKE_CASE ,do_normalize=_SCREAMING_SNAKE_CASE ,do_resize=_SCREAMING_SNAKE_CASE ,image_mean=[0.48145466, 0.4578275, 0.40821073] ,image_std=[0.26862954, 0.26130258, 0.27577711] ,resample=3 ,size=2_24 ,)
return image_processor
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = self.dummy_prior
_lowercase : Optional[Any] = self.dummy_image_encoder
_lowercase : Union[str, Any] = self.dummy_text_encoder
_lowercase : Tuple = self.dummy_tokenizer
_lowercase : str = self.dummy_image_processor
_lowercase : int = UnCLIPScheduler(
variance_type="""fixed_small_log""" ,prediction_type="""sample""" ,num_train_timesteps=10_00 ,clip_sample=_SCREAMING_SNAKE_CASE ,clip_sample_range=10.0 ,)
_lowercase : List[str] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
_lowercase : List[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
_lowercase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self ):
_lowercase : int = """cpu"""
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : Dict = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
_lowercase : Any = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_lowercase : str = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
_lowercase : List[str] = output.image_embeds
_lowercase : Dict = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) ,return_dict=_SCREAMING_SNAKE_CASE ,)[0]
_lowercase : Optional[int] = image[0, -10:]
_lowercase : Optional[int] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_lowercase : Optional[int] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCamelCase__ ( self ):
_lowercase : List[str] = torch_device == """cpu"""
_lowercase : Optional[Any] = True
_lowercase : int = False
self._test_inference_batch_single_identical(
test_max_difference=_SCREAMING_SNAKE_CASE ,relax_max_difference=_SCREAMING_SNAKE_CASE ,test_mean_pixel_difference=_SCREAMING_SNAKE_CASE ,)
@skip_mps
def lowerCamelCase__ ( self ):
_lowercase : str = torch_device == """cpu"""
_lowercase : Optional[int] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_SCREAMING_SNAKE_CASE ,test_mean_pixel_difference=_SCREAMING_SNAKE_CASE ,)
| 368 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = """<s>"""
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(UpperCAmelCase_ ) ,10_06 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] )
_lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self ):
# fmt: off
_lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 336 | 0 |
"""simple docstring"""
from math import pi, sqrt, tan
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
_lowercase : Optional[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(a__ , 2 ) * torus_radius * tube_radius
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
_lowercase : int = (sidea + sidea + sidea) / 2
_lowercase : List[str] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(a__ , a__ ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print("""\nSurface Areas of various geometric shapes: \n""")
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 369 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase ( __UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : int
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__SCREAMING_SNAKE_CASE ) )]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_lowercase : Dict = all_rotations(__SCREAMING_SNAKE_CASE )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowercase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__SCREAMING_SNAKE_CASE ),
}
return response
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_lowercase : int = int(__SCREAMING_SNAKE_CASE )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_lowercase : str = [""""""] * len(__SCREAMING_SNAKE_CASE )
for _ in range(len(__SCREAMING_SNAKE_CASE ) ):
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
_lowercase : Any = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase: str = """Provide a string that I will generate its BWT transform: """
UpperCAmelCase: Optional[Any] = input(entry_msg).strip()
UpperCAmelCase: Dict = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
UpperCAmelCase: str = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 370 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = []
for line in lines:
_lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(__UpperCAmelCase )
_lowercase : Tuple = """\n""".join(__UpperCAmelCase )
# Make a hash from all this code
_lowercase : Tuple = full_str.encode("""utf-8""" )
return shaaaa(__UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase: Tuple = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase: List[str] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
UpperCAmelCase: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 336 | 0 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase: Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase: Dict = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "xlm-prophetnet"
SCREAMING_SNAKE_CASE_ : Any = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : Any = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self ,UpperCAmelCase_ = 0.1 ,UpperCAmelCase_ = "gelu" ,UpperCAmelCase_ = 3_05_22 ,UpperCAmelCase_ = 10_24 ,UpperCAmelCase_ = 40_96 ,UpperCAmelCase_ = 12 ,UpperCAmelCase_ = 16 ,UpperCAmelCase_ = 40_96 ,UpperCAmelCase_ = 12 ,UpperCAmelCase_ = 16 ,UpperCAmelCase_ = 0.1 ,UpperCAmelCase_ = 0.1 ,UpperCAmelCase_ = 5_12 ,UpperCAmelCase_ = 0.02 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = 2 ,UpperCAmelCase_ = 32 ,UpperCAmelCase_ = 1_28 ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 0.0 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = 2 ,**UpperCAmelCase_ ,):
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Optional[int] = encoder_ffn_dim
_lowercase : Any = num_encoder_layers
_lowercase : int = num_encoder_attention_heads
_lowercase : Tuple = decoder_ffn_dim
_lowercase : List[str] = num_decoder_layers
_lowercase : List[str] = num_decoder_attention_heads
_lowercase : List[Any] = max_position_embeddings
_lowercase : int = init_std # Normal(0, this parameter)
_lowercase : Tuple = activation_function
# parameters for xlmprophetnet
_lowercase : Optional[Any] = ngram
_lowercase : str = num_buckets
_lowercase : str = relative_max_distance
_lowercase : Union[str, Any] = disable_ngram_loss
_lowercase : List[Any] = eps
# 3 Types of Dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : str = activation_dropout
_lowercase : Any = dropout
_lowercase : Tuple = use_cache
super().__init__(
pad_token_id=lowercase_ ,bos_token_id=lowercase_ ,eos_token_id=lowercase_ ,is_encoder_decoder=lowercase_ ,add_cross_attention=lowercase_ ,decoder_start_token_id=lowercase_ ,**lowercase_ ,)
@property
def lowerCamelCase__ ( self ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 371 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336 | 0 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
UpperCAmelCase: Dict = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class UpperCamelCase ( __lowercase ):
"""simple docstring"""
def __init__( self ,**UpperCAmelCase_ ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
return super().__call__(_a ,**_a )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Optional[int] = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_lowercase : Dict = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,UpperCAmelCase_="This is a sound of {}." ):
if isinstance(_a ,_a ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowercase : str = requests.get(_a ).content
else:
with open(_a ,"""rb""" ) as f:
_lowercase : Tuple = f.read()
if isinstance(_a ,_a ):
_lowercase : int = ffmpeg_read(_a ,self.feature_extractor.sampling_rate )
if not isinstance(_a ,np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
_lowercase : Dict = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors="""pt""" )
_lowercase : Tuple = candidate_labels
_lowercase : Optional[int] = [hypothesis_template.format(_a ) for x in candidate_labels]
_lowercase : List[Any] = self.tokenizer(_a ,return_tensors=self.framework ,padding=_a )
_lowercase : Tuple = [text_inputs]
return inputs
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = model_inputs.pop("""candidate_labels""" )
_lowercase : Dict = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,_a ):
_lowercase : Any = text_inputs[0]
else:
# Batching case.
_lowercase : Any = text_inputs[0][0]
_lowercase : Tuple = self.model(**_a ,**_a )
_lowercase : Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Any = model_outputs.pop("""candidate_labels""" )
_lowercase : str = model_outputs["""logits"""][0]
if self.framework == "pt":
_lowercase : Any = logits.softmax(dim=0 )
_lowercase : Any = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
_lowercase : Optional[Any] = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_a ,_a ) ,key=lambda UpperCAmelCase_ : -x[0] )
]
return result
| 350 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCAmelCase: Any = generate_large_matrix()
UpperCAmelCase: Dict = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
_lowercase : List[Any] = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowercase : Tuple = (left + right) // 2
_lowercase : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowercase : Dict = mid + 1
else:
_lowercase : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Any = 0
_lowercase : Optional[int] = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print("""Running benchmarks""" )
_lowercase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 336 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : List[Any] = a.name
_lowercase : Any = b.name
_lowercase : List[str] = ""
_lowercase : Dict = ""
_lowercase : List[Any] = a == b
_lowercase : List[Any] = name_a
_lowercase : Any = name_b
return res
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__lowerCamelCase , __lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __lowerCamelCase , __lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , __lowerCamelCase , __lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __lowerCamelCase , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
for n in graph_proto.node:
_node_replace_input_with(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Tuple = list(model.graph.initializer )
_lowercase : List[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_lowercase : List[str] = inits[i].name
_lowercase : Union[str, Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __lowerCamelCase , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : str = os.path.dirname(__lowerCamelCase )
_lowercase : Dict = os.path.basename(__lowerCamelCase )
_lowercase : Union[str, Any] = onnx.load(os.path.join(__lowerCamelCase , __lowerCamelCase ) )
_lowercase : Dict = list(model.graph.initializer )
_lowercase : Any = set()
_lowercase : List[Any] = {}
_lowercase : Union[str, Any] = []
_lowercase : str = 0
for i in range(len(__lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__lowerCamelCase )
dup_set.add(__lowerCamelCase )
_lowercase : Optional[int] = inits[j].data_type
_lowercase : int = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , __lowerCamelCase )
total_reduced_size += mem_size
_lowercase : Optional[Any] = inits[i].name
_lowercase : Dict = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__lowerCamelCase )
else:
_lowercase : Optional[Any] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
_lowercase : str = sorted(__lowerCamelCase )
_remove_dup_initializers_from_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_lowercase : List[Any] = "optimized_" + model_file_name
_lowercase : int = os.path.join(__lowerCamelCase , __lowerCamelCase )
onnx.save(__lowerCamelCase , __lowerCamelCase )
return new_model
| 351 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase: List[str] = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase: int = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 336 | 0 |
"""simple docstring"""
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = n
_lowercase : Dict = [None] * self.n
_lowercase : List[str] = 0 # index of the first element
_lowercase : Optional[int] = 0
_lowercase : Tuple = 0
def __len__( self ):
return self.size
def lowerCamelCase__ ( self ):
return self.size == 0
def lowerCamelCase__ ( self ):
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
_lowercase : Tuple = data
_lowercase : Any = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ):
if self.size == 0:
raise Exception("""UNDERFLOW""" )
_lowercase : int = self.array[self.front]
_lowercase : Union[str, Any] = None
_lowercase : Tuple = (self.front + 1) % self.n
self.size -= 1
return temp
| 352 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase : str = []
for i in range(__UpperCAmelCase ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ : str = 2
@register_to_config
def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,):
if trained_betas is not None:
_lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Any = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowercase : Tuple = 1.0 - self.betas
_lowercase : Dict = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
if schedule_timesteps is None:
_lowercase : Optional[int] = self.timesteps
_lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0
else:
_lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
_lowercase : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : str = self.index_for_timestep(UpperCAmelCase_ )
if self.state_in_first_order:
_lowercase : Optional[Any] = self.sigmas[step_index]
else:
_lowercase : Dict = self.sigmas_interpol[step_index]
_lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,):
_lowercase : List[str] = num_inference_steps
_lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ )
_lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ )
# interpolate sigmas
_lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
_lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowercase : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
# mps does not support float64
_lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa )
else:
_lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
# interpolate timesteps
_lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype )
_lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
_lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowercase : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# get log sigma
_lowercase : Optional[Any] = sigma.log()
# get distribution
_lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowercase : List[Any] = low_idx + 1
_lowercase : int = self.log_sigmas[low_idx]
_lowercase : Any = self.log_sigmas[high_idx]
# interpolate sigmas
_lowercase : Any = (low - log_sigma) / (low - high)
_lowercase : Dict = w.clamp(0 ,1 )
# transform interpolation to time range
_lowercase : List[str] = (1 - w) * low_idx + w * high_idx
_lowercase : Optional[int] = t.view(sigma.shape )
return t
@property
def lowerCamelCase__ ( self ):
return self.sample is None
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ )
# advance index counter by 1
_lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowercase : Any = self.sigmas[step_index]
_lowercase : Any = self.sigmas_interpol[step_index + 1]
_lowercase : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowercase : Union[str, Any] = self.sigmas[step_index - 1]
_lowercase : int = self.sigmas_interpol[step_index]
_lowercase : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowercase : Any = 0
_lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowercase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
_lowercase : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowercase : Optional[Any] = sigma_next - sigma_hat
_lowercase : Any = self.sample
_lowercase : Optional[int] = None
_lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ):
# mps does not support float64
_lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_lowercase : List[Any] = self.timesteps.to(original_samples.device )
_lowercase : Union[str, Any] = timesteps.to(original_samples.device )
_lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps]
_lowercase : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowercase : List[Any] = sigma.unsqueeze(-1 )
_lowercase : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 336 | 0 |
"""simple docstring"""
UpperCAmelCase: Dict = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 353 |
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 336 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 4_2
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = True
@register_to_config
def __init__( self ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = ("DownEncoderBlock2D",) ,UpperCAmelCase_ = ("UpDecoderBlock2D",) ,UpperCAmelCase_ = (64,) ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = "silu" ,UpperCAmelCase_ = 4 ,UpperCAmelCase_ = 32 ,UpperCAmelCase_ = 32 ,UpperCAmelCase_ = 0.18215 ,):
super().__init__()
# pass init params to Encoder
_lowercase : int = Encoder(
in_channels=__UpperCAmelCase ,out_channels=__UpperCAmelCase ,down_block_types=__UpperCAmelCase ,block_out_channels=__UpperCAmelCase ,layers_per_block=__UpperCAmelCase ,act_fn=__UpperCAmelCase ,norm_num_groups=__UpperCAmelCase ,double_z=__UpperCAmelCase ,)
# pass init params to Decoder
_lowercase : Any = Decoder(
in_channels=__UpperCAmelCase ,out_channels=__UpperCAmelCase ,up_block_types=__UpperCAmelCase ,block_out_channels=__UpperCAmelCase ,layers_per_block=__UpperCAmelCase ,norm_num_groups=__UpperCAmelCase ,act_fn=__UpperCAmelCase ,)
_lowercase : List[Any] = nn.Convad(2 * latent_channels ,2 * latent_channels ,1 )
_lowercase : Optional[Any] = nn.Convad(__UpperCAmelCase ,__UpperCAmelCase ,1 )
_lowercase : Union[str, Any] = False
_lowercase : int = False
# only relevant if vae tiling is enabled
_lowercase : Dict = self.config.sample_size
_lowercase : Any = (
self.config.sample_size[0]
if isinstance(self.config.sample_size ,(list, tuple) )
else self.config.sample_size
)
_lowercase : Optional[Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_lowercase : List[str] = 0.25
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=False ):
if isinstance(__UpperCAmelCase ,(Encoder, Decoder) ):
_lowercase : List[Any] = value
def lowerCamelCase__ ( self ,UpperCAmelCase_ = True ):
_lowercase : str = use_tiling
def lowerCamelCase__ ( self ):
self.enable_tiling(__UpperCAmelCase )
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = True
def lowerCamelCase__ ( self ):
_lowercase : Dict = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = {}
def fn_recursive_add_processors(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if hasattr(__UpperCAmelCase ,"""set_processor""" ):
_lowercase : Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" ,__UpperCAmelCase ,__UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
return processors
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : str = len(self.attn_processors.keys() )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if hasattr(__UpperCAmelCase ,"""set_processor""" ):
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
module.set_processor(__UpperCAmelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" ,__UpperCAmelCase ,__UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def lowerCamelCase__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__UpperCAmelCase ,return_dict=__UpperCAmelCase )
if self.use_slicing and x.shape[0] > 1:
_lowercase : List[str] = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )]
_lowercase : Union[str, Any] = torch.cat(__UpperCAmelCase )
else:
_lowercase : List[str] = self.encoder(__UpperCAmelCase )
_lowercase : int = self.quant_conv(__UpperCAmelCase )
_lowercase : List[str] = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__UpperCAmelCase ,return_dict=__UpperCAmelCase )
_lowercase : str = self.post_quant_conv(__UpperCAmelCase )
_lowercase : Tuple = self.decoder(__UpperCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
@apply_forward_hook
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = True ):
if self.use_slicing and z.shape[0] > 1:
_lowercase : Union[str, Any] = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )]
_lowercase : List[Any] = torch.cat(__UpperCAmelCase )
else:
_lowercase : Union[str, Any] = self._decode(__UpperCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Dict = min(a.shape[2] ,b.shape[2] ,__UpperCAmelCase )
for y in range(__UpperCAmelCase ):
_lowercase : Dict = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : List[Any] = min(a.shape[3] ,b.shape[3] ,__UpperCAmelCase )
for x in range(__UpperCAmelCase ):
_lowercase : Optional[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = True ):
_lowercase : Tuple = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_lowercase : str = int(self.tile_latent_min_size * self.tile_overlap_factor )
_lowercase : Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_lowercase : Dict = []
for i in range(0 ,x.shape[2] ,__UpperCAmelCase ):
_lowercase : Optional[int] = []
for j in range(0 ,x.shape[3] ,__UpperCAmelCase ):
_lowercase : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_lowercase : Tuple = self.encoder(__UpperCAmelCase )
_lowercase : List[Any] = self.quant_conv(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
_lowercase : Tuple = []
for i, row in enumerate(__UpperCAmelCase ):
_lowercase : int = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowercase : Any = self.blend_v(rows[i - 1][j] ,__UpperCAmelCase ,__UpperCAmelCase )
if j > 0:
_lowercase : List[Any] = self.blend_h(row[j - 1] ,__UpperCAmelCase ,__UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase ,dim=3 ) )
_lowercase : Tuple = torch.cat(__UpperCAmelCase ,dim=2 )
_lowercase : str = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = True ):
_lowercase : Any = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_lowercase : Union[str, Any] = int(self.tile_sample_min_size * self.tile_overlap_factor )
_lowercase : Optional[int] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_lowercase : List[str] = []
for i in range(0 ,z.shape[2] ,__UpperCAmelCase ):
_lowercase : Optional[Any] = []
for j in range(0 ,z.shape[3] ,__UpperCAmelCase ):
_lowercase : Optional[Any] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_lowercase : str = self.post_quant_conv(__UpperCAmelCase )
_lowercase : Tuple = self.decoder(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
_lowercase : str = []
for i, row in enumerate(__UpperCAmelCase ):
_lowercase : Optional[int] = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowercase : Any = self.blend_v(rows[i - 1][j] ,__UpperCAmelCase ,__UpperCAmelCase )
if j > 0:
_lowercase : int = self.blend_h(row[j - 1] ,__UpperCAmelCase ,__UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase ,dim=3 ) )
_lowercase : List[str] = torch.cat(__UpperCAmelCase ,dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,):
_lowercase : Any = sample
_lowercase : Optional[int] = self.encode(__UpperCAmelCase ).latent_dist
if sample_posterior:
_lowercase : Union[str, Any] = posterior.sample(generator=__UpperCAmelCase )
else:
_lowercase : int = posterior.mode()
_lowercase : int = self.decode(__UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
| 354 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : int
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_lowercase : Tuple = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowercase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_lowercase : Optional[Any] = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_lowercase : int = [""""""] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """
UpperCAmelCase: int = input(entry_msg).strip()
UpperCAmelCase: List[str] = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 336 | 0 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 'char'
SCREAMING_SNAKE_CASE_ : Dict = 'bpe'
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'wp'
UpperCAmelCase: Dict = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ['image_processor', 'char_tokenizer']
SCREAMING_SNAKE_CASE_ : int = 'ViTImageProcessor'
SCREAMING_SNAKE_CASE_ : Dict = 'MgpstrTokenizer'
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,**UpperCAmelCase_ ):
_lowercase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,SCREAMING_SNAKE_CASE_ ,)
_lowercase : List[Any] = kwargs.pop("""feature_extractor""" )
_lowercase : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
_lowercase : Optional[int] = tokenizer
_lowercase : Optional[int] = AutoTokenizer.from_pretrained("""gpt2""" )
_lowercase : Tuple = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def __call__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,**UpperCAmelCase_ ):
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_lowercase : Optional[Any] = self.image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if text is not None:
_lowercase : Dict = self.char_tokenizer(SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowercase : int = encodings["""input_ids"""]
return inputs
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = sequences
_lowercase : Optional[int] = char_preds.size(0 )
_lowercase : List[str] = self._decode_helper(SCREAMING_SNAKE_CASE_ ,"""char""" )
_lowercase : Union[str, Any] = self._decode_helper(SCREAMING_SNAKE_CASE_ ,"""bpe""" )
_lowercase : Any = self._decode_helper(SCREAMING_SNAKE_CASE_ ,"""wp""" )
_lowercase : Optional[int] = []
_lowercase : Union[str, Any] = []
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowercase : int = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowercase : Union[str, Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowercase : List[Any] = scores.index(max(SCREAMING_SNAKE_CASE_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_lowercase : Any = {}
_lowercase : List[str] = final_strs
_lowercase : Optional[int] = final_scores
_lowercase : List[str] = char_strs
_lowercase : Optional[Any] = bpe_strs
_lowercase : Dict = wp_strs
return out
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if format == DecodeType.CHARACTER:
_lowercase : Any = self.char_decode
_lowercase : Union[str, Any] = 1
_lowercase : Dict = """[s]"""
elif format == DecodeType.BPE:
_lowercase : str = self.bpe_decode
_lowercase : List[Any] = 2
_lowercase : Tuple = """#"""
elif format == DecodeType.WORDPIECE:
_lowercase : Union[str, Any] = self.wp_decode
_lowercase : Union[str, Any] = 1_02
_lowercase : int = """[SEP]"""
else:
raise ValueError(f"""Format {format} is not supported.""" )
_lowercase : Any = [], []
_lowercase : Dict = pred_logits.size(0 )
_lowercase : Optional[int] = pred_logits.size(1 )
_lowercase : List[str] = pred_logits.topk(1 ,dim=-1 ,largest=SCREAMING_SNAKE_CASE_ ,sorted=SCREAMING_SNAKE_CASE_ )
_lowercase : Optional[Any] = preds_index.view(-1 ,SCREAMING_SNAKE_CASE_ )[:, 1:]
_lowercase : Tuple = decoder(SCREAMING_SNAKE_CASE_ )
_lowercase : Union[str, Any] = torch.nn.functional.softmax(SCREAMING_SNAKE_CASE_ ,dim=2 ).max(dim=2 )
_lowercase : Optional[int] = preds_max_prob[:, 1:]
for index in range(SCREAMING_SNAKE_CASE_ ):
_lowercase : int = preds_str[index].find(SCREAMING_SNAKE_CASE_ )
_lowercase : Dict = preds_str[index][:pred_eos]
_lowercase : List[str] = preds_index[index].cpu().tolist()
_lowercase : List[str] = pred_index.index(SCREAMING_SNAKE_CASE_ ) if eos_token in pred_index else -1
_lowercase : Dict = preds_max_prob[index][: pred_eos_index + 1]
_lowercase : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(SCREAMING_SNAKE_CASE_ )
conf_scores.append(SCREAMING_SNAKE_CASE_ )
return dec_strs, conf_scores
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Tuple = [seq.replace(""" """ ,"""""" ) for seq in self.char_tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )]
return decode_strs
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return self.bpe_tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = [seq.replace(""" """ ,"""""" ) for seq in self.wp_tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )]
return decode_strs
| 355 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
_lowercase : Tuple = randint(-5000 , 5000 )
return (arr, r)
UpperCAmelCase: int = make_dataset()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
for triplet in permutations(__UpperCAmelCase , 3 ):
if sum(__UpperCAmelCase ) == target:
return tuple(sorted(__UpperCAmelCase ) )
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
arr.sort()
_lowercase : Optional[Any] = len(__UpperCAmelCase )
for i in range(n - 1 ):
_lowercase , _lowercase : str = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Tuple = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_lowercase : Union[str, Any] = """
triplet_sum1(*dataset)
"""
_lowercase : Union[str, Any] = """
triplet_sum2(*dataset)
"""
_lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
_lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
return (min(__UpperCAmelCase ), min(__UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase: Any = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 336 | 0 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 356 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
# add QFormer tokenizer
_lowercase : Optional[int] = qformer_tokenizer
def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
_lowercase : List[Any] = BatchFeature()
if text is not None:
_lowercase : List[str] = self.tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
encoding.update(UpperCAmelCase_ )
_lowercase : Dict = self.qformer_tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : str = qformer_text_encoding.pop("""input_ids""" )
_lowercase : int = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
_lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.tokenizer.model_input_names
_lowercase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ )
_lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ )
return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" )
_lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
args.append(UpperCAmelCase_ )
return cls(*UpperCAmelCase_ )
| 336 | 0 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return (data["data"], data["target"])
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = XGBClassifier()
classifier.fit(A__ , A__ )
return classifier
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Tuple = load_iris()
_lowercase , _lowercase : Optional[Any] = data_handling(A__ )
_lowercase , _lowercase , _lowercase , _lowercase : str = train_test_split(
A__ , A__ , test_size=0.2_5 )
_lowercase : Optional[int] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
_lowercase : int = xgboost(A__ , A__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
A__ , A__ , A__ , display_labels=A__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 357 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase: Tuple = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : Tuple = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=5 ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2_49 ,UpperCAmelCase_=6 ,UpperCAmelCase_=17 ,UpperCAmelCase_=25 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0006 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,**UpperCAmelCase_ ,):
_lowercase : Dict = vocab_size
_lowercase : List[str] = action_weight
_lowercase : int = reward_weight
_lowercase : List[Any] = value_weight
_lowercase : List[str] = max_position_embeddings
_lowercase : Any = block_size
_lowercase : Any = action_dim
_lowercase : List[str] = observation_dim
_lowercase : Union[str, Any] = transition_dim
_lowercase : str = learning_rate
_lowercase : Tuple = n_layer
_lowercase : Optional[int] = n_head
_lowercase : List[str] = n_embd
_lowercase : List[str] = embd_pdrop
_lowercase : Optional[Any] = attn_pdrop
_lowercase : List[Any] = resid_pdrop
_lowercase : str = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : List[Any] = kaiming_initializer_range
_lowercase : List[Any] = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
| 336 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase: Any = logging.get_logger(__name__)
UpperCAmelCase: Optional[Any] = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class UpperCamelCase ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'van'
def __init__( self ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=3 ,UpperCAmelCase_=[7, 3, 3, 3] ,UpperCAmelCase_=[4, 2, 2, 2] ,UpperCAmelCase_=[64, 1_28, 3_20, 5_12] ,UpperCAmelCase_=[3, 3, 12, 3] ,UpperCAmelCase_=[8, 8, 4, 4] ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=1E-2 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,**UpperCAmelCase_ ,):
super().__init__(**__lowerCAmelCase )
_lowercase : List[Any] = image_size
_lowercase : Union[str, Any] = num_channels
_lowercase : Optional[int] = patch_sizes
_lowercase : int = strides
_lowercase : List[str] = hidden_sizes
_lowercase : List[str] = depths
_lowercase : int = mlp_ratios
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = initializer_range
_lowercase : str = layer_norm_eps
_lowercase : Tuple = layer_scale_init_value
_lowercase : List[Any] = drop_path_rate
_lowercase : int = dropout_rate
| 358 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase: Any = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model"
def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Tuple = intermediate_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = patch_size
_lowercase : Optional[Any] = image_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = attention_dropout
_lowercase : List[Any] = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : Tuple = qkv_bias
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer"
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : List[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Optional[Any] = hidden_act
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Tuple = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : Dict = cross_attention_frequency
_lowercase : Optional[Any] = encoder_hidden_size
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "instructblip"
SCREAMING_SNAKE_CASE_ : List[str] = True
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ):
super().__init__(**UpperCAmelCase_ )
if vision_config is None:
_lowercase : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_lowercase : Any = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_lowercase : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ )
_lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ )
_lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : Union[str, Any] = self.text_config.is_encoder_decoder
_lowercase : List[str] = num_query_tokens
_lowercase : List[str] = self.vision_config.hidden_size
_lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Union[str, Any] = 1.0
_lowercase : Dict = 0.02
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowercase : int = self.vision_config.to_dict()
_lowercase : Any = self.qformer_config.to_dict()
_lowercase : Any = self.text_config.to_dict()
_lowercase : Optional[int] = self.__class__.model_type
return output
| 336 | 0 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase: Union[str, Any] = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=SCREAMING_SNAKE_CASE__ ) )
class UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
with TemporaryDirectory() as tmp_dir:
_lowercase : str = dataset_module_factory(a_ ,cache_dir=a_ )
_lowercase : Dict = import_main_class(dataset_module.module_path ,dataset=a_ )
_lowercase : Optional[Any] = builder_cls(
cache_dir=a_ ,config_name=a_ ,hash=dataset_module.hash ,)
_lowercase : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a_ ).replace(os.sep ,"""/""" ),
config.DATASET_INFO_FILENAME,
] )
_lowercase : int = cached_path(a_ ,cache_dir=a_ )
self.assertTrue(os.path.exists(a_ ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : List[str] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
_lowercase : int = dataset_module_factory("""wikipedia""" , cache_dir=_UpperCamelCase )
_lowercase : Dict = import_main_class(dataset_module.module_path )
_lowercase : Optional[int] = builder_cls(
cache_dir=_UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_lowercase : Union[str, Any] = None
builder_instance.download_and_prepare()
_lowercase : List[Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Any = dataset_module_factory("""wikipedia""" , cache_dir=_UpperCamelCase )
_lowercase : List[str] = import_main_class(dataset_module.module_path , dataset=_UpperCamelCase )
_lowercase : Union[str, Any] = builder_cls(
cache_dir=_UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
_lowercase : Any = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , _UpperCamelCase )
assert next(iter(ds["""train"""] ) )
| 359 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336 | 0 |
"""simple docstring"""
UpperCAmelCase: Dict = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 360 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Dict = {"""unk_token""": """<unk>"""}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,UpperCAmelCase_ )
self.assertIn("""attention_mask""" ,UpperCAmelCase_ )
self.assertNotIn("""labels""" ,UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : List[Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ["""A long paragraph for summarization."""]
_lowercase : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : Union[str, Any] = inputs["""input_ids"""]
_lowercase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : str = ["""Summary of the text.""", """Another summary."""]
_lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ )
_lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]]
_lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = """A, <mask> AllenNLP sentence."""
_lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 336 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
def decorator(__UpperCAmelCase ):
_lowercase : Optional[Any] = getattr(a__ , """handle_key""" , [] )
handle += [key]
setattr(a__ , """handle_key""" , a__ )
return func
return decorator
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase ):
def decorator(__UpperCAmelCase ):
_lowercase : int = getattr(a__ , """handle_key""" , [] )
handle += keys
setattr(a__ , """handle_key""" , a__ )
return func
return decorator
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __new__( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = super().__new__(cls ,_snake_case ,_snake_case ,_snake_case )
if not hasattr(_snake_case ,"""key_handler""" ):
setattr(_snake_case ,"""key_handler""" ,{} )
setattr(_snake_case ,"""handle_input""" ,KeyHandler.handle_input )
for value in attrs.values():
_lowercase : List[Any] = getattr(_snake_case ,"""handle_key""" ,[] )
for key in handled_keys:
_lowercase : Dict = value
return new_cls
@staticmethod
def lowerCamelCase__ ( cls ):
_lowercase : Optional[int] = get_character()
if char != KEYMAP["undefined"]:
_lowercase : Union[str, Any] = ord(_snake_case )
_lowercase : Dict = cls.key_handler.get(_snake_case )
if handler:
_lowercase : int = char
return handler(cls )
else:
return None
def __SCREAMING_SNAKE_CASE ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 361 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336 | 0 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ):
_lowercase : Union[str, Any] = [2, 1, 2, -1]
_lowercase : Tuple = [1, 2, 3, 4]
def lowerCamelCase__ ( self ):
_lowercase : Any = len(self.first_signal )
_lowercase : str = len(self.second_signal )
_lowercase : Tuple = max(UpperCAmelCase_ ,UpperCAmelCase_ )
# create a zero matrix of max_length x max_length
_lowercase : Tuple = [[0] * max_length for i in range(UpperCAmelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCAmelCase_ ):
_lowercase : Tuple = deque(self.second_signal )
rotated_signal.rotate(UpperCAmelCase_ )
for j, item in enumerate(UpperCAmelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
_lowercase : Optional[Any] = np.matmul(np.transpose(UpperCAmelCase_ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCAmelCase_ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 362 |
"""simple docstring"""
UpperCAmelCase: List[str] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 336 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __SCREAMING_SNAKE_CASE ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
"""simple docstring"""
UpperCAmelCase: str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase: int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 336 | 0 |
"""simple docstring"""
import numpy as np
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ):
_lowercase : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowercase : str = math.floor(val / multiple ) * multiple
if x < min_val:
_lowercase : Dict = math.ceil(val / multiple ) * multiple
return x
_lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size
_lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase )
_lowercase , _lowercase : Union[str, Any] = output_size
# determine new height and width
_lowercase : str = output_height / input_height
_lowercase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowercase : str = scale_width
else:
# fit height
_lowercase : int = scale_height
_lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase )
_lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase )
return (new_height, new_width)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84}
_lowercase : str = get_size_dict(UpperCAmelCase_ )
_lowercase : Tuple = do_resize
_lowercase : Any = size
_lowercase : List[Any] = keep_aspect_ratio
_lowercase : Any = ensure_multiple_of
_lowercase : str = resample
_lowercase : Optional[Any] = do_rescale
_lowercase : List[Any] = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowercase : Dict = get_resize_output_image_size(
UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,)
return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : int = get_size_dict(UpperCAmelCase_ )
_lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowercase : List[str] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : int = image_std if image_std is not None else self.image_std
_lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
_lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images]
_lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images]
_lowercase : int = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase_ ):
_lowercase : Tuple = target_sizes.numpy()
_lowercase : Optional[Any] = []
for idx in range(len(UpperCAmelCase_ ) ):
_lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ )
_lowercase : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
_lowercase : Union[str, Any] = logits.argmax(dim=1 )
_lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 336 | 0 |
"""simple docstring"""
from typing import Any
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
_lowercase : str = data
_lowercase : Tuple = None
class UpperCamelCase :
"""simple docstring"""
def __init__( self ):
_lowercase : Union[str, Any] = None
def lowerCamelCase__ ( self ):
_lowercase : int = self.head
while temp is not None:
print(temp.data ,end=""" """ )
_lowercase : List[str] = temp.next
print()
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = Node(UpperCAmelCase_ )
_lowercase : int = self.head
_lowercase : Any = new_node
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if node_data_a == node_data_a:
return
else:
_lowercase : List[str] = self.head
while node_a is not None and node_a.data != node_data_a:
_lowercase : Union[str, Any] = node_a.next
_lowercase : str = self.head
while node_a is not None and node_a.data != node_data_a:
_lowercase : str = node_a.next
if node_a is None or node_a is None:
return
_lowercase : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
UpperCAmelCase: List[str] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 365 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase: Tuple = [0, 25, 50]
UpperCAmelCase: List[Any] = [25, 50, 75]
UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase: Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase: List[Any] = np.ones(75)
UpperCAmelCase: Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase: int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase: int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 336 | 0 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class UpperCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = tempfile.mkdtemp()
_lowercase : Optional[int] = 8
# DPR tok
_lowercase : Optional[int] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowercase : List[Any] = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
_lowercase : Optional[int] = os.path.join(lowerCAmelCase__ ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_lowercase : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : Optional[Any] = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
_lowercase : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Optional[Any] = {"""unk_token""": """<unk>"""}
_lowercase : Optional[int] = os.path.join(self.tmpdirname ,"""bart_tokenizer""" )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
_lowercase : Dict = os.path.join(lowerCAmelCase__ ,BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(lowerCAmelCase__ ,BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase__ ) )
def lowerCamelCase__ ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) )
def lowerCamelCase__ ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) )
def lowerCamelCase__ ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) )
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Tuple = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" ,string_factory="""Flat""" ,metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_dummy_dataset()
_lowercase : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,)
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
_lowercase : Any = dataset
_lowercase : Optional[Any] = RagRetriever(
lowerCAmelCase__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
return retriever
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.get_dummy_dataset()
_lowercase : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="""custom""" ,)
if from_disk:
_lowercase : List[str] = os.path.join(self.tmpdirname ,"""dataset""" )
_lowercase : str = os.path.join(self.tmpdirname ,"""index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname ,"""index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname ,"""dataset""" ) )
del dataset
_lowercase : Optional[int] = RagRetriever(
lowerCAmelCase__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
else:
_lowercase : List[Any] = RagRetriever(
lowerCAmelCase__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,index=CustomHFIndex(config.retrieval_vector_size ,lowerCAmelCase__ ) ,)
return retriever
def lowerCamelCase__ ( self ):
_lowercase : int = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" ,string_factory="""Flat""" ,metric_type=faiss.METRIC_INNER_PRODUCT )
_lowercase : List[Any] = os.path.join(self.tmpdirname ,"""hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" ,index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] ,open(index_file_name + """.index_meta.dpr""" ,"""wb""" ) )
_lowercase : Dict = os.path.join(self.tmpdirname ,"""psgs_w100.tsv.pkl""" )
_lowercase : List[Any] = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(lowerCAmelCase__ ,open(lowerCAmelCase__ ,"""wb""" ) )
_lowercase : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="""legacy""" ,index_path=self.tmpdirname ,)
_lowercase : str = RagRetriever(
lowerCAmelCase__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = 1
_lowercase : str = self.get_dummy_canonical_hf_index_retriever()
_lowercase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_lowercase , _lowercase , _lowercase : List[str] = retriever.retrieve(lowerCAmelCase__ ,n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) ,lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]["""id"""][0] ,"""1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] ,"""0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
_lowercase : Any = self.get_dummy_dataset()
retriever.save_pretrained(lowerCAmelCase__ )
_lowercase : List[Any] = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
_lowercase : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_lowercase : List[str] = retriever.retrieve(lowerCAmelCase__ ,n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase__ ( self ):
_lowercase : Any = 1
_lowercase : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
_lowercase : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_lowercase , _lowercase , _lowercase : Any = retriever.retrieve(lowerCAmelCase__ ,n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) ,lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]["""id"""][0] ,"""1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] ,"""0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase__ )
_lowercase : Union[str, Any] = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
_lowercase : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_lowercase : Dict = retriever.retrieve(lowerCAmelCase__ ,n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase__ ( self ):
_lowercase : int = 1
_lowercase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
_lowercase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_lowercase , _lowercase , _lowercase : int = retriever.retrieve(lowerCAmelCase__ ,n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) ,lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]["""id"""][0] ,"""1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] ,"""0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase__ )
_lowercase : Tuple = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
_lowercase : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_lowercase : Dict = retriever.retrieve(lowerCAmelCase__ ,n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase__ ( self ):
_lowercase : Dict = 1
_lowercase : Tuple = self.get_dummy_legacy_index_retriever()
_lowercase : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_lowercase , _lowercase , _lowercase : Optional[int] = retriever.retrieve(lowerCAmelCase__ ,n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) ,lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]["""text"""][0] ,"""bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] ,"""foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def lowerCamelCase__ ( self ):
_lowercase : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase__ )
_lowercase : Optional[Any] = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
_lowercase : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_lowercase : Union[str, Any] = retriever.retrieve(lowerCAmelCase__ ,n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase__ ( self ):
import torch
_lowercase : str = 1
_lowercase : List[Any] = self.get_dummy_canonical_hf_index_retriever()
_lowercase : List[Any] = [[5, 7], [10, 11]]
_lowercase : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_lowercase : List[str] = retriever(lowerCAmelCase__ ,lowerCAmelCase__ ,prefix=retriever.config.generator.prefix ,n_docs=lowerCAmelCase__ )
_lowercase , _lowercase , _lowercase : List[Any] = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ ,np.ndarray )
_lowercase : Tuple = retriever(
lowerCAmelCase__ ,lowerCAmelCase__ ,prefix=retriever.config.generator.prefix ,n_docs=lowerCAmelCase__ ,return_tensors="""pt""" ,)
_lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase__ ,torch.Tensor )
self.assertIsInstance(lowerCAmelCase__ ,torch.Tensor )
self.assertIsInstance(lowerCAmelCase__ ,torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase__ ( self ):
_lowercase : Any = self.get_dpr_ctx_encoder_tokenizer()
_lowercase : Union[str, Any] = 1
_lowercase : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
retriever.set_ctx_encoder_tokenizer(lowerCAmelCase__ )
_lowercase : Union[str, Any] = [[5, 7], [10, 11]]
_lowercase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_lowercase : Tuple = retriever(lowerCAmelCase__ ,lowerCAmelCase__ ,prefix=retriever.config.generator.prefix ,n_docs=lowerCAmelCase__ )
self.assertEqual(
len(lowerCAmelCase__ ) ,6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) ,lowerCAmelCase__ ) # check for doc token related keys in dictionary.
| 366 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_lowercase : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
_lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
_lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : List[Any] = self.get_image_processor()
_lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ )
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
_lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : int = self.prepare_image_inputs()
_lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" )
_lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : List[Any] = """lower newer"""
_lowercase : Any = processor(text=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : str = """lower newer"""
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(UpperCAmelCase_ )
_lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Optional[Any] = """lower newer"""
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 336 | 0 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = "x" , __UpperCAmelCase = 10**-10 , __UpperCAmelCase = 1 , ):
_lowercase : Any = symbols(UpperCamelCase__ )
_lowercase : List[str] = lambdify(UpperCamelCase__ , UpperCamelCase__ )
_lowercase : Union[str, Any] = lambdify(UpperCamelCase__ , diff(UpperCamelCase__ , UpperCamelCase__ ) )
_lowercase : Union[str, Any] = starting_point
while True:
if diff_function(UpperCamelCase__ ) != 0:
_lowercase : Optional[Any] = prev_guess - multiplicity * func(UpperCamelCase__ ) / diff_function(
UpperCamelCase__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_lowercase : Optional[int] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 367 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ):
import pyspark
def generate_fn():
_lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
_lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" )
_lowercase : int = partition_df.collect()
_lowercase : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,):
_lowercase : Union[str, Any] = df
_lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
@property
def lowerCamelCase__ ( self ):
return len(self.partition_order )
class UpperCamelCase ( datasets.DatasetBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = SparkConfig
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
import pyspark
_lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowercase : List[Any] = df
_lowercase : int = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ )
_lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ ,"""a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowercase : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowerCamelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
_lowercase : List[str] = self.df.count()
_lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowercase : Union[str, Any] = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowercase : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) )
_lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
import pyspark
_lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
_lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
_lowercase : Any = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowercase : Union[str, Any] = self.config.features
_lowercase : Optional[int] = self._writer_batch_size
_lowercase : Optional[Any] = self._fs.storage_options
def write_arrow(UpperCAmelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowercase : Any = pyspark.TaskContext().taskAttemptId()
_lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
_lowercase : List[Any] = 0
_lowercase : int = writer_class(
features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Optional[int] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowercase , _lowercase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
shard_id += 1
_lowercase : Union[str, Any] = writer_class(
features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Dict = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
_lowercase , _lowercase : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
_lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : List[str] = (
self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
self._validate_cache_dir()
_lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
_lowercase : Optional[int] = not is_remote_filesystem(self._fs )
_lowercase : Dict = os.path.join if is_local else posixpath.join
_lowercase : int = """-TTTTT-SSSSS-of-NNNNN"""
_lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ )
_lowercase : List[Any] = 0
_lowercase : Optional[Any] = 0
_lowercase : int = 0
_lowercase : Any = []
_lowercase : Any = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
_lowercase : Optional[int] = total_num_examples
_lowercase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_lowercase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowercase : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
rename(
UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,)
_lowercase : Optional[Any] = []
_lowercase : List[str] = 0
for i in range(len(UpperCAmelCase_ ) ):
_lowercase , _lowercase : List[str] = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
_lowercase : Tuple = 0
_lowercase : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,):
return SparkExamplesIterable(self.df )
| 336 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase: Tuple = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Optional[int] = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[str] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[Any] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Tuple = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: str = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase: List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = """<s>"""
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(UpperCAmelCase_ ) ,10_06 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] )
_lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self ):
# fmt: off
_lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 336 | 0 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = CustomTokenizer
pass
| 369 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1000000 ):
_lowercase : int = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 370 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = []
for line in lines:
_lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(__UpperCAmelCase )
_lowercase : Tuple = """\n""".join(__UpperCAmelCase )
# Make a hash from all this code
_lowercase : Tuple = full_str.encode("""utf-8""" )
return shaaaa(__UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase: Tuple = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase: List[str] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
UpperCAmelCase: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 336 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1000000 ):
_lowercase : int = 1
_lowercase : Dict = 1
_lowercase : Any = {1: 1}
for inputa in range(2 , _lowerCAmelCase ):
_lowercase : Dict = 0
_lowercase : List[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowercase : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowercase : Union[str, Any] = counter
if counter > pre_counter:
_lowercase : Optional[int] = inputa
_lowercase : Optional[Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 371 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : str = len(__lowerCAmelCase )
while cur > 1:
# Find the maximum number in arr
_lowercase : str = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowercase : int = arr[mi::-1] + arr[mi + 1 : len(__lowerCAmelCase )]
# Reverse whole list
_lowercase : Dict = arr[cur - 1 :: -1] + arr[cur : len(__lowerCAmelCase )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase: Dict = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase: List[str] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 350 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCAmelCase: Any = generate_large_matrix()
UpperCAmelCase: Dict = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
_lowercase : List[Any] = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowercase : Tuple = (left + right) // 2
_lowercase : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowercase : Dict = mid + 1
else:
_lowercase : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Any = 0
_lowercase : Optional[int] = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print("""Running benchmarks""" )
_lowercase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 336 | 0 |
"""simple docstring"""
from itertools import permutations
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_lowercase : Dict = [7, 11, 13, 17]
for i, test in enumerate(_UpperCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 10 ):
return sum(
int("""""".join(map(_UpperCAmelCase , _UpperCAmelCase ) ) )
for num in permutations(range(_UpperCAmelCase ) )
if is_substring_divisible(_UpperCAmelCase ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 351 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase: List[str] = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase: int = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 336 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
UpperCAmelCase: str = logging.get_logger(__name__)
UpperCAmelCase: int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase: str = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
UpperCAmelCase: Tuple = {"""allegro/herbert-base-cased""": 514}
UpperCAmelCase: int = {}
class UpperCamelCase ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[Any] = HerbertTokenizer
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="<unk>" ,UpperCAmelCase_="<pad>" ,UpperCAmelCase_="<mask>" ,UpperCAmelCase_="</s>" ,**UpperCAmelCase_ ,):
super().__init__(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,tokenizer_file=__SCREAMING_SNAKE_CASE ,cls_token=__SCREAMING_SNAKE_CASE ,unk_token=__SCREAMING_SNAKE_CASE ,pad_token=__SCREAMING_SNAKE_CASE ,mask_token=__SCREAMING_SNAKE_CASE ,sep_token=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = [self.cls_token_id]
_lowercase : str = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE ,token_ids_a=__SCREAMING_SNAKE_CASE ,already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : List[str] = [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : str = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE ,name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 352 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase : str = []
for i in range(__UpperCAmelCase ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ : str = 2
@register_to_config
def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,):
if trained_betas is not None:
_lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Any = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowercase : Tuple = 1.0 - self.betas
_lowercase : Dict = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
if schedule_timesteps is None:
_lowercase : Optional[int] = self.timesteps
_lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0
else:
_lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
_lowercase : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : str = self.index_for_timestep(UpperCAmelCase_ )
if self.state_in_first_order:
_lowercase : Optional[Any] = self.sigmas[step_index]
else:
_lowercase : Dict = self.sigmas_interpol[step_index]
_lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,):
_lowercase : List[str] = num_inference_steps
_lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ )
_lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ )
# interpolate sigmas
_lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
_lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowercase : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
# mps does not support float64
_lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa )
else:
_lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
# interpolate timesteps
_lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype )
_lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
_lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowercase : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# get log sigma
_lowercase : Optional[Any] = sigma.log()
# get distribution
_lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowercase : List[Any] = low_idx + 1
_lowercase : int = self.log_sigmas[low_idx]
_lowercase : Any = self.log_sigmas[high_idx]
# interpolate sigmas
_lowercase : Any = (low - log_sigma) / (low - high)
_lowercase : Dict = w.clamp(0 ,1 )
# transform interpolation to time range
_lowercase : List[str] = (1 - w) * low_idx + w * high_idx
_lowercase : Optional[int] = t.view(sigma.shape )
return t
@property
def lowerCamelCase__ ( self ):
return self.sample is None
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ )
# advance index counter by 1
_lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowercase : Any = self.sigmas[step_index]
_lowercase : Any = self.sigmas_interpol[step_index + 1]
_lowercase : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowercase : Union[str, Any] = self.sigmas[step_index - 1]
_lowercase : int = self.sigmas_interpol[step_index]
_lowercase : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowercase : Any = 0
_lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowercase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
_lowercase : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowercase : Optional[Any] = sigma_next - sigma_hat
_lowercase : Any = self.sample
_lowercase : Optional[int] = None
_lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ):
# mps does not support float64
_lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_lowercase : List[Any] = self.timesteps.to(original_samples.device )
_lowercase : Union[str, Any] = timesteps.to(original_samples.device )
_lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps]
_lowercase : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowercase : List[Any] = sigma.unsqueeze(-1 )
_lowercase : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 336 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Dict = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_lowercase : Optional[int] = 6
_lowercase : int = 1
_lowercase : List[Any] = 1901
_lowercase : Any = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowercase : Optional[Any] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_lowercase : Dict = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_lowercase : int = day - days_per_month[month - 2]
if month > 12:
year += 1
_lowercase : int = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 353 |
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 336 | 0 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any]=1024 , __UpperCAmelCase : Dict=1024 , __UpperCAmelCase : Dict=False , **__UpperCAmelCase : Union[str, Any] ):
_lowercase : Dict = AutoTokenizer.from_pretrained(_lowerCAmelCase )
_lowercase : Union[str, Any] = SeqaSeqDataset(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , type_path="""train""" , **_lowerCAmelCase )
_lowercase : str = tok.pad_token_id
def get_lens(__UpperCAmelCase : int ):
_lowercase : List[Any] = tqdm(
DataLoader(_lowerCAmelCase , batch_size=512 , num_workers=8 , shuffle=_lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_lowercase : List[Any] = []
for batch in dl:
_lowercase : Dict = batch["""input_ids"""].ne(_lowerCAmelCase ).sum(1 ).tolist()
_lowercase : Optional[Any] = batch["""labels"""].ne(_lowerCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_lowerCAmelCase , _lowerCAmelCase ):
max_lens.append(max(_lowerCAmelCase , _lowerCAmelCase ) )
else:
max_lens.extend(_lowerCAmelCase )
return max_lens
_lowercase : str = get_lens(_lowerCAmelCase )
_lowercase : Tuple = SeqaSeqDataset(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , type_path="""val""" , **_lowerCAmelCase )
_lowercase : Union[str, Any] = get_lens(_lowerCAmelCase )
pickle_save(_lowerCAmelCase , train_ds.len_file )
pickle_save(_lowerCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 354 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : int
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_lowercase : Tuple = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowercase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_lowercase : Optional[Any] = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_lowercase : int = [""""""] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """
UpperCAmelCase: int = input(entry_msg).strip()
UpperCAmelCase: List[str] = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 336 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.