code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import numpy as np
def a__ ( A__, A__ ):
return np.where(vector > 0, A__, (alpha * (np.exp(A__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__magic_name__ : Dict = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[str] = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__magic_name__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case ( lowerCAmelCase_ ) -> Any:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowerCAmelCase_ ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def snake_case ( lowerCAmelCase_ ) -> List[str]:
_snake_case = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
_snake_case = PipelineDataFormat.from_str(
format=lowerCAmelCase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowerCAmelCase_ , lowerCAmelCase_ )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] , __lowerCamelCase : Pipeline , __lowerCamelCase : PipelineDataFormat ):
"""simple docstring"""
_snake_case = nlp
_snake_case = reader
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : ArgumentParser ):
"""simple docstring"""
_snake_case = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=__lowerCamelCase , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=__lowerCamelCase , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=__lowerCamelCase , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=__lowerCamelCase , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=__lowerCamelCase , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=__lowerCamelCase , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=__lowerCamelCase , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=__lowerCamelCase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case , _snake_case = self._nlp, []
for entry in self._reader:
_snake_case = nlp(**__lowerCamelCase ) if self._reader.is_multi_columns else nlp(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
outputs.append(__lowerCamelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case = self._reader.save_binary(__lowerCamelCase )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(__lowerCamelCase )
| 103 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 | 0 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
self.enable_attention_slicing(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 512 , SCREAMING_SNAKE_CASE__ = 512 , SCREAMING_SNAKE_CASE__ = 50 , SCREAMING_SNAKE_CASE__ = 7.5 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "pil" , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(SCREAMING_SNAKE_CASE__ )}.""" )
# get prompt text embeddings
A__ = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
A__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A__ , A__ , A__ = text_embeddings.shape
A__ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE__ , 1 )
A__ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A__ = 42
if negative_prompt is None:
A__ = [""]
elif type(SCREAMING_SNAKE_CASE__ ) is not type(SCREAMING_SNAKE_CASE__ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE__ )} !="""
f""" {type(SCREAMING_SNAKE_CASE__ )}.""" )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE__ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
A__ = negative_prompt
A__ = text_input_ids.shape[-1]
A__ = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="pt" , )
A__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A__ = uncond_embeddings.shape[1]
A__ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
A__ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A__ = torch.randn(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device="cpu" , dtype=SCREAMING_SNAKE_CASE__ ).to(self.device )
A__ = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device="cpu" , dtype=SCREAMING_SNAKE_CASE__ ).to(
self.device )
else:
A__ = torch.randn(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
A__ = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
A__ = latents_reference.to(self.device )
A__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A__ = (latents_shape[3] - latents_shape_reference[3]) // 2
A__ = (latents_shape[2] - latents_shape_reference[2]) // 2
A__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A__ = 0 if dx < 0 else dx
A__ = 0 if dy < 0 else dy
A__ = max(-dx , 0 )
A__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ = {}
if accepts_eta:
A__ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
A__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# predict the noise residual
A__ = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ ).sample
# perform guidance
if do_classifier_free_guidance:
A__ , A__ = noise_pred.chunk(2 )
A__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A__ = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = 1 / 0.1_8_2_1_5 * latents
A__ = self.vae.decode(SCREAMING_SNAKE_CASE__ ).sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A__ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) , return_tensors="pt" ).to(
self.device )
A__ , A__ = self.safety_checker(
images=SCREAMING_SNAKE_CASE__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A__ = None
if output_type == "pil":
A__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE__ , nsfw_content_detected=SCREAMING_SNAKE_CASE__ )
| 104 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Any ) -> Optional[int]: # noqa: E741
while r - l > 1:
__A : Any = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : List[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[str] = 1
__A : List[Any] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : int = v[i]
elif v[i] > tail[length - 1]:
__A : Union[str, Any] = v[i]
length += 1
else:
__A : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' ,snake_case__ ,)
super().__init__(*snake_case__ ,**snake_case__ )
| 105 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : int = VideoToVideoSDPipeline
A_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
A_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
A_ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
A_ : Dict = False
# No `output_type`.
A_ : str = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
A = CLIPTextModel(__UpperCamelCase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=0 ) -> Optional[int]:
# 3 frames
A = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('mps' ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __UpperCamelCase ( self : Optional[int] ) -> str:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = VideoToVideoSDPipeline(**__UpperCamelCase )
A = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs(__UpperCamelCase )
A = 'np'
A = sd_pipe(**__UpperCamelCase ).frames
A = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
A = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self : int ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCamelCase , expected_max_diff=5e-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCamelCase ( self : str ) -> Any:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
pass
def __UpperCamelCase ( self : Any ) -> Any:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
A = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
A = torch.Generator(device='cpu' ).manual_seed(0 )
A = torch.randn((1, 10, 3, 1_024, 576) , generator=__UpperCamelCase )
A = video.to('cuda' )
A = 'Spiderman is surfing'
A = pipe(__UpperCamelCase , video=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=3 , output_type='pt' ).frames
A = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2 | 106 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Optional[Any] = '''▁'''
_UpperCAmelCase : Tuple = {'''vocab_file''': '''spiece.model'''}
_UpperCAmelCase : Optional[Any] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
_UpperCAmelCase : Tuple = {
'''google/pegasus-xsum''': 5_12,
}
_UpperCAmelCase : str = logging.get_logger(__name__)
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self : Optional[int], UpperCamelCase__ : str, UpperCamelCase__ : Optional[Any]="<pad>", UpperCamelCase__ : List[Any]="</s>", UpperCamelCase__ : Dict="<unk>", UpperCamelCase__ : List[Any]="<mask_2>", UpperCamelCase__ : Dict="<mask_1>", UpperCamelCase__ : List[str]=None, UpperCamelCase__ : List[str]=1_03, UpperCamelCase__ : Optional[Dict[str, Any]] = None, **UpperCamelCase__ : List[str], ) -> None:
_A = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError(
f'additional_special_tokens should be of type {type(UpperCamelCase__ )}, but is'
f' {type(UpperCamelCase__ )}' )
_A = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(UpperCamelCase__ ), self.offset - 1 )
]
if len(set(UpperCamelCase__ ) ) != len(UpperCamelCase__ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
_A = additional_special_tokens_extended
else:
_A = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2, self.offset )]
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__, unk_token=UpperCamelCase__, mask_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token_sent=UpperCamelCase__, offset=UpperCamelCase__, additional_special_tokens=UpperCamelCase__, sp_model_kwargs=self.sp_model_kwargs, **UpperCamelCase__, )
_A = mask_token_sent
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# add special tokens to encoder dict
_A = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
_A = {v: k for k, v in self.encoder.items()}
@property
def __UpperCAmelCase ( self : str ) -> int:
return len(self.sp_model ) + self.offset
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
_A = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> Tuple:
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Any, UpperCamelCase__ : str ) -> List[str]:
_A = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int, UpperCamelCase__ : str ) -> List[str]:
return self.sp_model.encode(UpperCamelCase__, out_type=UpperCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : str ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
_A = self.sp_model.piece_to_id(UpperCamelCase__ )
return sp_id + self.offset
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : int ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
_A = self.sp_model.IdToPiece(index - self.offset )
return token
def __UpperCAmelCase ( self : str, UpperCamelCase__ : Dict ) -> Optional[int]:
_A = []
_A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
_A = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : int=False ) -> List[Any]:
return 1
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Optional[Any] ) -> List[str]:
_A = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : List, UpperCamelCase__ : Optional[List] = None, UpperCamelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase__ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[Any]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : str, UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_A = os.path.join(
UpperCamelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__, 'wb' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 107 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17 | 0 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__a: Union[str, Any] = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def _SCREAMING_SNAKE_CASE ( __snake_case = "dhaka" , __snake_case = 5 ) -> int:
_UpperCAmelCase = min(__snake_case , 5_0 ) # Prevent abuse!
_UpperCAmelCase = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
_UpperCAmelCase = requests.get("""https://www.google.com/search""" , params=__snake_case , headers=__snake_case )
_UpperCAmelCase = BeautifulSoup(html.text , """html.parser""" )
_UpperCAmelCase = """""".join(
re.findall(r"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
_UpperCAmelCase = json.dumps(__snake_case )
_UpperCAmelCase = json.loads(__snake_case )
_UpperCAmelCase = re.findall(
r"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , __snake_case , )
if not matched_google_image_data:
return 0
_UpperCAmelCase = re.sub(
r"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(__snake_case ) , )
_UpperCAmelCase = re.findall(
r"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , __snake_case , )
for index, fixed_full_res_image in enumerate(__snake_case ):
if index >= max_images:
return index
_UpperCAmelCase = bytes(__snake_case , """ascii""" ).decode(
"""unicode-escape""" )
_UpperCAmelCase = bytes(__snake_case , """ascii""" ).decode(
"""unicode-escape""" )
_UpperCAmelCase = urllib.request.build_opener()
_UpperCAmelCase = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(__snake_case )
_UpperCAmelCase = f"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(__snake_case ):
os.makedirs(__snake_case )
urllib.request.urlretrieve( # noqa: S310
__snake_case , f"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__a: Optional[int] = download_images_from_google_query(sys.argv[1])
print(F"{image_count} images were downloaded to disk.")
except IndexError:
print('''Please provide a search term.''')
raise | 108 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]:
# Initialise PyTorch model
__A : Dict = MobileBertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__A : Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
__A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 17 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
UpperCamelCase__ = get_logger(__name__)
UpperCamelCase__ = Path(__file__).parent / 'model_card_template.md'
UpperCamelCase__ = uuida().hex
UpperCamelCase__ = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
UpperCamelCase__ = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
UpperCamelCase__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def lowerCamelCase ( _snake_case = None ):
UpperCAmelCase__ : int = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' ,'' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_snake_case ,_snake_case ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(_snake_case ,_snake_case ):
ua += "; " + user_agent
return ua
def lowerCamelCase ( _snake_case ,_snake_case = None ,_snake_case = None ):
if token is None:
UpperCAmelCase__ : int = HfFolder.get_token()
if organization is None:
UpperCAmelCase__ : Any = whoami(_snake_case )['name']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def lowerCamelCase ( _snake_case ,_snake_case ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(_snake_case ,'local_rank' ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase__ : List[Any] = args.hub_token if hasattr(_snake_case ,'hub_token' ) else None
UpperCAmelCase__ : Dict = get_full_repo_name(_snake_case ,token=_snake_case )
UpperCAmelCase__ : str = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' ,license='apache-2.0' ,library_name='diffusers' ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=_snake_case ,model_name=_snake_case ,repo_name=_snake_case ,dataset_name=args.dataset_name if hasattr(_snake_case ,'dataset_name' ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_snake_case ,'gradient_accumulation_steps' ) else None
) ,adam_betaa=args.adam_betaa if hasattr(_snake_case ,'adam_beta1' ) else None ,adam_betaa=args.adam_betaa if hasattr(_snake_case ,'adam_beta2' ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(_snake_case ,'adam_weight_decay' ) else None ,adam_epsilon=args.adam_epsilon if hasattr(_snake_case ,'adam_epsilon' ) else None ,lr_scheduler=args.lr_scheduler if hasattr(_snake_case ,'lr_scheduler' ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(_snake_case ,'lr_warmup_steps' ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(_snake_case ,'ema_inv_gamma' ) else None ,ema_power=args.ema_power if hasattr(_snake_case ,'ema_power' ) else None ,ema_max_decay=args.ema_max_decay if hasattr(_snake_case ,'ema_max_decay' ) else None ,mixed_precision=args.mixed_precision ,)
UpperCAmelCase__ : List[str] = os.path.join(args.output_dir ,'README.md' )
model_card.save(_snake_case )
def lowerCamelCase ( _snake_case ,_snake_case = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase__ : Tuple = str(Path(_snake_case ).as_posix() )
UpperCAmelCase__ : str = re.search(r'snapshots/([^/]+)/' ,_snake_case )
if search is None:
return None
UpperCAmelCase__ : int = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
UpperCamelCase__ = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
UpperCamelCase__ = os.path.join(hf_cache_home, 'diffusers')
def lowerCamelCase ( _snake_case = None ,_snake_case = None ):
if new_cache_dir is None:
UpperCAmelCase__ : Dict = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase__ : List[str] = old_diffusers_cache
UpperCAmelCase__ : List[str] = Path(_snake_case ).expanduser()
UpperCAmelCase__ : Optional[Any] = Path(_snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase__ : Tuple = new_cache_dir / old_blob_path.relative_to(_snake_case )
new_blob_path.parent.mkdir(parents=_snake_case ,exist_ok=_snake_case )
os.replace(_snake_case ,_snake_case )
try:
os.symlink(_snake_case ,_snake_case )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
UpperCamelCase__ = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
UpperCamelCase__ = 0
else:
with open(cache_version_file) as f:
try:
UpperCamelCase__ = int(f.read())
except ValueError:
UpperCamelCase__ = 0
if cache_version < 1:
UpperCamelCase__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
UpperCamelCase__ = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'the directory exists and can be written to.'
)
def lowerCamelCase ( _snake_case ,_snake_case = None ):
if variant is not None:
UpperCAmelCase__ : List[Any] = weights_name.split('.' )
UpperCAmelCase__ : Optional[int] = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase__ : List[str] = '.'.join(_snake_case )
return weights_name
def lowerCamelCase ( _snake_case ,*,
_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ,):
UpperCAmelCase__ : Tuple = str(_snake_case )
if os.path.isfile(_snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(_snake_case ):
if os.path.isfile(os.path.join(_snake_case ,_snake_case ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase__ : List[Any] = os.path.join(_snake_case ,_snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_snake_case ,_snake_case ,_snake_case ) ):
UpperCAmelCase__ : List[str] = os.path.join(_snake_case ,_snake_case ,_snake_case )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_snake_case ).base_version ) >= version.parse('0.20.0' )
):
try:
UpperCAmelCase__ : Optional[int] = hf_hub_download(
_snake_case ,filename=_add_variant(_snake_case ,_snake_case ) ,cache_dir=_snake_case ,force_download=_snake_case ,proxies=_snake_case ,resume_download=_snake_case ,local_files_only=_snake_case ,use_auth_token=_snake_case ,user_agent=_snake_case ,subfolder=_snake_case ,revision=revision or commit_hash ,)
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,_snake_case ,)
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_snake_case ,_snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_snake_case ,_snake_case )}\' so that the correct variant file can be added.''' ,_snake_case ,)
try:
# 2. Load model file as usual
UpperCAmelCase__ : Dict = hf_hub_download(
_snake_case ,filename=_snake_case ,cache_dir=_snake_case ,force_download=_snake_case ,proxies=_snake_case ,resume_download=_snake_case ,local_files_only=_snake_case ,use_auth_token=_snake_case ,user_agent=_snake_case ,subfolder=_snake_case ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'this model name. Check the model page at '
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 110 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case ( _lowercase ,unittest.TestCase ):
_a = AudioLDMPipeline
_a = TEXT_TO_AUDIO_PARAMS
_a = TEXT_TO_AUDIO_BATCH_PARAMS
_a = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def UpperCAmelCase__ ( self : List[Any]):
torch.manual_seed(0)
lowerCAmelCase_ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(3_2, 6_4) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=__A , )
lowerCAmelCase_ : List[str] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0)
lowerCAmelCase_ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
lowerCAmelCase_ : str = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
lowerCAmelCase_ : int = ClapTextModelWithProjection(__A)
lowerCAmelCase_ : str = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=7_7)
lowerCAmelCase_ : List[str] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__A , )
lowerCAmelCase_ : List[Any] = SpeechTaHifiGan(__A)
lowerCAmelCase_ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def UpperCAmelCase__ ( self : Dict , A_ : Tuple , A_ : str=0):
if str(__A).startswith('''mps'''):
lowerCAmelCase_ : Tuple = torch.manual_seed(__A)
else:
lowerCAmelCase_ : Optional[Any] = torch.Generator(device=__A).manual_seed(__A)
lowerCAmelCase_ : List[str] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Tuple = self.get_dummy_components()
lowerCAmelCase_ : Dict = AudioLDMPipeline(**__A)
lowerCAmelCase_ : int = audioldm_pipe.to(__A)
audioldm_pipe.set_progress_bar_config(disable=__A)
lowerCAmelCase_ : List[Any] = self.get_dummy_inputs(__A)
lowerCAmelCase_ : Optional[Any] = audioldm_pipe(**__A)
lowerCAmelCase_ : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(__A) == 2_5_6
lowerCAmelCase_ : str = audio[:1_0]
lowerCAmelCase_ : List[str] = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : str = self.get_dummy_components()
lowerCAmelCase_ : List[str] = AudioLDMPipeline(**__A)
lowerCAmelCase_ : Any = audioldm_pipe.to(__A)
lowerCAmelCase_ : Any = audioldm_pipe.to(__A)
audioldm_pipe.set_progress_bar_config(disable=__A)
lowerCAmelCase_ : int = self.get_dummy_inputs(__A)
lowerCAmelCase_ : List[str] = 3 * [inputs["""prompt"""]]
# forward
lowerCAmelCase_ : List[Any] = audioldm_pipe(**__A)
lowerCAmelCase_ : Tuple = output.audios[0]
lowerCAmelCase_ : Any = self.get_dummy_inputs(__A)
lowerCAmelCase_ : Optional[Any] = 3 * [inputs.pop('''prompt''')]
lowerCAmelCase_ : str = audioldm_pipe.tokenizer(
__A , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__A , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[int] = text_inputs["""input_ids"""].to(__A)
lowerCAmelCase_ : int = audioldm_pipe.text_encoder(
__A , )
lowerCAmelCase_ : Optional[int] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase_ : Optional[int] = F.normalize(__A , dim=-1)
lowerCAmelCase_ : List[str] = prompt_embeds
# forward
lowerCAmelCase_ : Union[str, Any] = audioldm_pipe(**__A)
lowerCAmelCase_ : str = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Tuple = self.get_dummy_components()
lowerCAmelCase_ : str = AudioLDMPipeline(**__A)
lowerCAmelCase_ : Union[str, Any] = audioldm_pipe.to(__A)
lowerCAmelCase_ : Optional[int] = audioldm_pipe.to(__A)
audioldm_pipe.set_progress_bar_config(disable=__A)
lowerCAmelCase_ : str = self.get_dummy_inputs(__A)
lowerCAmelCase_ : Dict = 3 * ["""this is a negative prompt"""]
lowerCAmelCase_ : List[Any] = negative_prompt
lowerCAmelCase_ : Tuple = 3 * [inputs["""prompt"""]]
# forward
lowerCAmelCase_ : Union[str, Any] = audioldm_pipe(**__A)
lowerCAmelCase_ : Tuple = output.audios[0]
lowerCAmelCase_ : Tuple = self.get_dummy_inputs(__A)
lowerCAmelCase_ : Optional[Any] = 3 * [inputs.pop('''prompt''')]
lowerCAmelCase_ : Tuple = []
for p in [prompt, negative_prompt]:
lowerCAmelCase_ : Any = audioldm_pipe.tokenizer(
__A , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__A , return_tensors='''pt''' , )
lowerCAmelCase_ : Union[str, Any] = text_inputs["""input_ids"""].to(__A)
lowerCAmelCase_ : List[Any] = audioldm_pipe.text_encoder(
__A , )
lowerCAmelCase_ : Union[str, Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase_ : str = F.normalize(__A , dim=-1)
embeds.append(__A)
lowerCAmelCase_ : Optional[Any] = embeds
# forward
lowerCAmelCase_ : List[Any] = audioldm_pipe(**__A)
lowerCAmelCase_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=__A)
lowerCAmelCase_ : Any = AudioLDMPipeline(**__A)
lowerCAmelCase_ : List[Any] = audioldm_pipe.to(__A)
audioldm_pipe.set_progress_bar_config(disable=__A)
lowerCAmelCase_ : Tuple = self.get_dummy_inputs(__A)
lowerCAmelCase_ : int = """egg cracking"""
lowerCAmelCase_ : List[str] = audioldm_pipe(**__A , negative_prompt=__A)
lowerCAmelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(__A) == 2_5_6
lowerCAmelCase_ : List[Any] = audio[:1_0]
lowerCAmelCase_ : Union[str, Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=__A)
lowerCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**__A)
lowerCAmelCase_ : Optional[Any] = audioldm_pipe.to(__A)
audioldm_pipe.set_progress_bar_config(disable=__A)
lowerCAmelCase_ : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
lowerCAmelCase_ : Dict = audioldm_pipe(__A , num_inference_steps=2).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCAmelCase_ : List[str] = 2
lowerCAmelCase_ : str = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
lowerCAmelCase_ : List[str] = 2
lowerCAmelCase_ : int = audioldm_pipe(__A , num_inference_steps=2 , num_waveforms_per_prompt=__A).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
lowerCAmelCase_ : Union[str, Any] = 2
lowerCAmelCase_ : Any = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__A).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : str = self.get_dummy_components()
lowerCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**__A)
lowerCAmelCase_ : Optional[int] = audioldm_pipe.to(__A)
audioldm_pipe.set_progress_bar_config(disable=__A)
lowerCAmelCase_ : int = audioldm_pipe.vocoder.config.sampling_rate
lowerCAmelCase_ : Tuple = self.get_dummy_inputs(__A)
lowerCAmelCase_ : Optional[Any] = audioldm_pipe(audio_length_in_s=0.016 , **__A)
lowerCAmelCase_ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(__A) / vocoder_sampling_rate == 0.016
lowerCAmelCase_ : Dict = audioldm_pipe(audio_length_in_s=0.032 , **__A)
lowerCAmelCase_ : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(__A) / vocoder_sampling_rate == 0.032
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : List[Any] = self.get_dummy_components()
lowerCAmelCase_ : Dict = AudioLDMPipeline(**__A)
lowerCAmelCase_ : Union[str, Any] = audioldm_pipe.to(__A)
audioldm_pipe.set_progress_bar_config(disable=__A)
lowerCAmelCase_ : int = ["""hey"""]
lowerCAmelCase_ : int = audioldm_pipe(__A , num_inference_steps=1)
lowerCAmelCase_ : List[Any] = output.audios.shape
assert audio_shape == (1, 2_5_6)
lowerCAmelCase_ : List[str] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(__A).to(__A)
lowerCAmelCase_ : Union[str, Any] = audioldm_pipe(__A , num_inference_steps=1)
lowerCAmelCase_ : Optional[Any] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCAmelCase__ ( self : int):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__A)
def UpperCAmelCase__ ( self : str):
self._test_inference_batch_single_identical(test_mean_pixel_difference=__A)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : Optional[Any]):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__A)
@slow
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : str , A_ : List[str] , A_ : Dict="cpu" , A_ : int=torch.floataa , A_ : List[str]=0):
lowerCAmelCase_ : str = torch.Generator(device=__A).manual_seed(__A)
lowerCAmelCase_ : str = np.random.RandomState(__A).standard_normal((1, 8, 1_2_8, 1_6))
lowerCAmelCase_ : str = torch.from_numpy(__A).to(device=__A , dtype=__A)
lowerCAmelCase_ : Tuple = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Any = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''')
lowerCAmelCase_ : Optional[int] = audioldm_pipe.to(__A)
audioldm_pipe.set_progress_bar_config(disable=__A)
lowerCAmelCase_ : Optional[Any] = self.get_inputs(__A)
lowerCAmelCase_ : Dict = 2_5
lowerCAmelCase_ : Tuple = audioldm_pipe(**__A).audios[0]
assert audio.ndim == 1
assert len(__A) == 8_1_9_2_0
lowerCAmelCase_ : Tuple = audio[7_7_2_3_0:7_7_2_4_0]
lowerCAmelCase_ : Optional[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315])
lowerCAmelCase_ : Optional[Any] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1e-2
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : int = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''')
lowerCAmelCase_ : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
lowerCAmelCase_ : List[Any] = audioldm_pipe.to(__A)
audioldm_pipe.set_progress_bar_config(disable=__A)
lowerCAmelCase_ : Tuple = self.get_inputs(__A)
lowerCAmelCase_ : Dict = audioldm_pipe(**__A).audios[0]
assert audio.ndim == 1
assert len(__A) == 8_1_9_2_0
lowerCAmelCase_ : str = audio[2_7_7_8_0:2_7_7_9_0]
lowerCAmelCase_ : str = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212])
lowerCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3e-2
| 171 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case( _lowercase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = BioGptTokenizer
UpperCAmelCase : List[Any] = False
def __snake_case ( self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__A , range(len(__A ) ) ) )
lowerCAmelCase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def __snake_case ( self , A_ ) -> Any:
lowerCAmelCase = """lower newer"""
lowerCAmelCase = """lower newer"""
return input_text, output_text
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase = """lower"""
lowerCAmelCase = ["""low""", """er</w>"""]
lowerCAmelCase = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCAmelCase = tokens + ["""<unk>"""]
lowerCAmelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def __snake_case ( self ) -> int:
lowerCAmelCase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__A )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a ) | 433 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 0 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowercase__ (_lowercase ):
"""simple docstring"""
def __lt__( self : str , __a : Tuple ):
return self[-1] < other[-1]
def __eq__( self : Union[str, Any] , __a : Tuple ):
return self[-1] == other[-1]
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : list[Stack] = []
# sort into stacks
for element in collection:
snake_case__ : str = Stack([element])
snake_case__ : List[str] = bisect_left(a__ , a__)
if i != len(a__):
stacks[i].append(a__)
else:
stacks.append(a__)
# use a heap-based merge to merge stack efficiently
snake_case__ : Optional[Any] = merge(*(reversed(a__) for stack in stacks))
return collection
if __name__ == "__main__":
lowercase_: Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
lowercase_: str = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 648 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
debug_launcher(test_script.main )
def snake_case__ ( self : Dict ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 376 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Optional[Any] ="""ylacombe/bark-small"""
lowercase : Any =tempfile.mkdtemp()
lowercase : Optional[int] ="""en_speaker_1"""
lowercase : List[Any] ="""This is a test string"""
lowercase : int ="""speaker_embeddings_path.json"""
lowercase : List[str] ="""speaker_embeddings"""
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **__A )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =self.get_tokenizer()
lowercase : Optional[Any] =BarkProcessor(tokenizer=__A )
processor.save_pretrained(self.tmpdirname )
lowercase : List[str] =BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase : str =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase : Optional[int] =BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase : int =35
lowercase : Union[str, Any] =2
lowercase : Optional[Any] =8
lowercase : List[str] ={
"""semantic_prompt""": np.ones(__A ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase : Union[str, Any] =processor(text=self.input_string , voice_preset=__A )
lowercase : List[str] =inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__A , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase : List[str] =os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__A , **__A )
lowercase : Optional[Any] =processor(text=self.input_string , voice_preset=__A )
lowercase : str =inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__A , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase : Dict =processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : str =self.get_tokenizer()
lowercase : str =BarkProcessor(tokenizer=__A )
lowercase : int =processor(text=self.input_string )
lowercase : int =tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__A , return_attention_mask=__A , return_token_type_ids=__A , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 92 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
def _SCREAMING_SNAKE_CASE ( snake_case ) -> list[int]:
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
_UpperCAmelCase = [True] * (num + 1)
_UpperCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a__ ):
_UpperCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num)) | 518 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowerCamelCase__ ( _a):
return (torch.arange(state.num_processes) + 1.0 + (state.num_processes * state.process_index)).to(state.device)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = create_tensor(a__)
SCREAMING_SNAKE_CASE : Any = gather(a__)
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1))
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = [state.process_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = gather_object(a__)
assert len(a__) == state.num_processes, f"{gathered_obj}, {len(a__)} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes)), f"{gathered_obj} != {list(range(state.num_processes))}"
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[str] = create_tensor(a__)
SCREAMING_SNAKE_CASE : Tuple = broadcast(a__)
assert broadcasted_tensor.shape == torch.Size([state.num_processes])
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1))
def lowerCamelCase__ ( _a):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
SCREAMING_SNAKE_CASE : Any = torch.arange(state.num_processes + 1).to(state.device)
else:
SCREAMING_SNAKE_CASE : str = torch.arange(state.num_processes).to(state.device)
SCREAMING_SNAKE_CASE : int = pad_across_processes(a__)
assert padded_tensor.shape == torch.Size([state.num_processes + 1])
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes)) + [0]
def lowerCamelCase__ ( _a):
# For now runs on only two processes
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = create_tensor(a__)
SCREAMING_SNAKE_CASE : str = reduce(a__ , "sum")
SCREAMING_SNAKE_CASE : int = torch.tensor([4.0, 6]).to(state.device)
assert torch.allclose(a__ , a__), f"{reduced_tensor} != {truth_tensor}"
def lowerCamelCase__ ( _a):
# For now runs on only two processes
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE : Optional[Any] = create_tensor(a__)
SCREAMING_SNAKE_CASE : Optional[Any] = reduce(a__ , "mean")
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([2.0, 3]).to(state.device)
assert torch.allclose(a__ , a__), f"{reduced_tensor} != {truth_tensor}"
def lowerCamelCase__ ( _a):
# For xla_spawn (TPUs)
main()
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Any = PartialState()
state.print(f"State: {state}")
state.print("testing gather")
test_gather(a__)
state.print("testing gather_object")
test_gather_object(a__)
state.print("testing broadcast")
test_broadcast(a__)
state.print("testing pad_across_processes")
test_pad_across_processes(a__)
state.print("testing reduce_sum")
test_reduce_sum(a__)
state.print("testing reduce_mean")
test_reduce_mean(a__)
if __name__ == "__main__":
main() | 25 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ : Optional[Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = None
_lowercase : str = None
def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
__A : List[Any] = dataset_module_factory(__A , cache_dir=__A )
__A : Tuple = import_main_class(dataset_module.module_path , dataset=__A )
__A : DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
__A : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__A : Union[str, Any] = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : List[Any] = import_main_class(dataset_module.module_path )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__A : Any = None
builder_instance.download_and_prepare()
__A : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]:
__A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : str = import_main_class(dataset_module.module_path ,dataset=a__ )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
__A : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ ,a__ )
assert "train" in ds
assert isinstance(ds["""train"""] ,a__ )
assert next(iter(ds["""train"""] ) )
| 17 | 0 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ = StableDiffusionDiffEditPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
snake_case__ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case__ = frozenset([])
def _UpperCamelCase ( self : List[str] ) -> List[Any]:
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__A , )
_UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_one=__A , )
_UpperCamelCase = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_zero=__A , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
_UpperCamelCase = CLIPTextModel(__A )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _UpperCamelCase ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=0 ) -> Optional[int]:
_UpperCamelCase = floats_tensor((1, 16, 16) , rng=random.Random(__A ) ).to(__A )
_UpperCamelCase = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(__A )
else:
_UpperCamelCase = torch.Generator(device=__A ).manual_seed(__A )
_UpperCamelCase = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : List[str]=0 ) -> List[str]:
_UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(__A ) ).convert('''RGB''' )
if str(__A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(__A )
else:
_UpperCamelCase = torch.Generator(device=__A ).manual_seed(__A )
_UpperCamelCase = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _UpperCamelCase ( self : int , __UpperCamelCase : str , __UpperCamelCase : List[Any]=0 ) -> Optional[Any]:
_UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(__A ) ).convert('''RGB''' )
if str(__A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(__A )
else:
_UpperCamelCase = torch.Generator(device=__A ).manual_seed(__A )
_UpperCamelCase = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__A , __A , __A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_UpperCamelCase = self.get_dummy_inputs(__A )
_UpperCamelCase = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
_UpperCamelCase = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__A , __A ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
_UpperCamelCase = self.get_dummy_inputs(__A )
_UpperCamelCase = pipe_loaded(**__A )[0]
_UpperCamelCase = np.abs(output - output_loaded ).max()
self.assertLess(__A , 1E-4 )
def _UpperCamelCase ( self : List[Any] ) -> str:
_UpperCamelCase = """cpu"""
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_UpperCamelCase = self.get_dummy_mask_inputs(__A )
_UpperCamelCase = pipe.generate_mask(**__A )
_UpperCamelCase = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
_UpperCamelCase = np.array([0] * 9 )
_UpperCamelCase = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _UpperCamelCase ( self : int ) -> List[Any]:
_UpperCamelCase = """cpu"""
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_UpperCamelCase = self.get_dummy_inversion_inputs(__A )
_UpperCamelCase = pipe.invert(**__A ).images
_UpperCamelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_UpperCamelCase = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
_UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1E-3 )
def _UpperCamelCase ( self : Optional[Any] ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _UpperCamelCase ( self : List[Any] ) -> int:
_UpperCamelCase = """cpu"""
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""}
_UpperCamelCase = DPMSolverMultistepScheduler(**__A )
_UpperCamelCase = DPMSolverMultistepInverseScheduler(**__A )
_UpperCamelCase = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_UpperCamelCase = self.get_dummy_inversion_inputs(__A )
_UpperCamelCase = pipe.invert(**__A ).images
_UpperCamelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_UpperCamelCase = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
_UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : str ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _UpperCamelCase ( cls : str ) -> str:
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
_UpperCamelCase = raw_image.convert('''RGB''' ).resize((768, 768) )
_UpperCamelCase = raw_image
def _UpperCamelCase ( self : List[str] ) -> str:
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=__A , torch_dtype=torch.floataa )
_UpperCamelCase = DDIMScheduler.from_config(pipe.scheduler.config )
_UpperCamelCase = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
_UpperCamelCase = """a bowl of fruit"""
_UpperCamelCase = """a bowl of pears"""
_UpperCamelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=__A , target_prompt=__A , generator=__A , )
_UpperCamelCase = pipe.invert(
prompt=__A , image=self.raw_image , inpaint_strength=0.7 , generator=__A ).latents
_UpperCamelCase = pipe(
prompt=__A , mask_image=__A , image_latents=__A , generator=__A , negative_prompt=__A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
_UpperCamelCase = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=__A , torch_dtype=torch.floataa )
_UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_UpperCamelCase = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
_UpperCamelCase = """a bowl of fruit"""
_UpperCamelCase = """a bowl of pears"""
_UpperCamelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=__A , target_prompt=__A , generator=__A , )
_UpperCamelCase = pipe.invert(
prompt=__A , image=self.raw_image , inpaint_strength=0.7 , generator=__A , num_inference_steps=25 , ).latents
_UpperCamelCase = pipe(
prompt=__A , mask_image=__A , image_latents=__A , generator=__A , negative_prompt=__A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
_UpperCamelCase = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 420 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCamelCase__ :Optional[Any] = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ :List[str] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ :int = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ :Dict = model(__A )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , __A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1e-3 ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCamelCase__ :int = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ :Optional[Any] = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ :Dict = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ :Dict = model(__A )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , __A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1e-3 ) ) | 189 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 0 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
SCREAMING_SNAKE_CASE_:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_:int = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE_:Dict = {
'''allenai/led-base-16384''': 16_384,
}
class SCREAMING_SNAKE_CASE__ ( _lowercase ):
'''simple docstring'''
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Dict = LEDTokenizer
__lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__="replace", lowerCamelCase__="<s>", lowerCamelCase__="</s>", lowerCamelCase__="</s>", lowerCamelCase__="<s>", lowerCamelCase__="<unk>", lowerCamelCase__="<pad>", lowerCamelCase__="<mask>", lowerCamelCase__=False, lowerCamelCase__=True, **lowerCamelCase__, ):
super().__init__(
__A, __A, tokenizer_file=__A, errors=__A, bos_token=__A, eos_token=__A, sep_token=__A, cls_token=__A, unk_token=__A, pad_token=__A, mask_token=__A, add_prefix_space=__A, trim_offsets=__A, **__A, )
A : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", __A ) != add_prefix_space:
A : int = getattr(__A, pre_tok_state.pop("""type""" ) )
A : Optional[Any] = add_prefix_space
A : Optional[int] = pre_tok_class(**__A )
A : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A : Optional[int] = """post_processor"""
A : Tuple = getattr(self.backend_tokenizer, __A, __A )
if tokenizer_component_instance:
A : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A : Dict = tuple(state["""sep"""] )
if "cls" in state:
A : Optional[Any] = tuple(state["""cls"""] )
A : Optional[Any] = False
if state.get("""add_prefix_space""", __A ) != add_prefix_space:
A : Tuple = add_prefix_space
A : Union[str, Any] = True
if state.get("""trim_offsets""", __A ) != trim_offsets:
A : int = trim_offsets
A : Optional[Any] = True
if changes_to_apply:
A : Union[str, Any] = getattr(__A, state.pop("""type""" ) )
A : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer, __A, __A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCAmelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[Any] = AddedToken(__A, lstrip=__A, rstrip=__A ) if isinstance(__A, __A ) else value
A : List[Any] = value
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
A : Any = kwargs.get("""is_split_into_words""", __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__A, **__A )
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
A : Tuple = kwargs.get("""is_split_into_words""", __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__A, **__A )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : List[Any] = self._tokenizer.model.save(__A, name=__A )
return tuple(__A )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None ):
A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Dict = [self.sep_token_id]
A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = PaddingStrategy.DO_NOT_PAD, lowerCamelCase__ = None, lowerCamelCase__ = None, ):
A : Optional[Any] = super()._pad(
encoded_inputs=__A, max_length=__A, padding_strategy=__A, pad_to_multiple_of=__A, return_attention_mask=__A, )
# Load from model defaults
if return_attention_mask is None:
A : Dict = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A : Dict = len(encoded_inputs["""global_attention_mask"""] ) != len(__A )
if needs_to_be_padded:
A : Any = len(__A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A : int = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
A : List[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 662 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
_snake_case = parser.parse_args()
_snake_case = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 510 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 | 0 |
def UpperCamelCase( __UpperCamelCase : Tuple ,__UpperCamelCase : List[str] ):
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(a__ ):
for j in range(a__ ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) ,end='''\t''' )
else:
print('''INF''' ,end='''\t''' )
print()
def UpperCamelCase( __UpperCamelCase : Optional[int] ,__UpperCamelCase : int ):
lowerCAmelCase_ : Optional[Any] = [[float('''inf''' ) for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
for j in range(a__ ):
lowerCAmelCase_ : Union[str, Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a__ ):
# looping through rows of graph array
for i in range(a__ ):
# looping through columns of graph array
for j in range(a__ ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowerCAmelCase_ : Any = dist[i][k] + dist[k][j]
_print_dist(a__ ,a__ )
return dist, v
if __name__ == "__main__":
A__ : str = int(input('''Enter number of vertices: '''))
A__ : List[Any] = int(input('''Enter number of edges: '''))
A__ : int = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
A__ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
A__ : int = int(input('''Enter source:'''))
A__ : Tuple = int(input('''Enter destination:'''))
A__ : int = float(input('''Enter weight:'''))
A__ : List[Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 171 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any ) -> Optional[int]: # noqa: E741
"""simple docstring"""
while r - l > 1:
lowerCAmelCase = (l + r) // 2
if v[m] >= key:
lowerCAmelCase = m
else:
lowerCAmelCase = m # noqa: E741
return r
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] ) -> int:
"""simple docstring"""
if len(a__ ) == 0:
return 0
lowerCAmelCase = [0] * len(a__ )
lowerCAmelCase = 1
lowerCAmelCase = v[0]
for i in range(1 , len(a__ ) ):
if v[i] < tail[0]:
lowerCAmelCase = v[i]
elif v[i] > tail[length - 1]:
lowerCAmelCase = v[i]
length += 1
else:
lowerCAmelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod() | 433 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase_: Union[str, Any] = data_utils.TransfoXLTokenizer
lowercase_: Optional[Any] = data_utils.TransfoXLCorpus
lowercase_: Union[str, Any] = data_utils
lowercase_: int = data_utils
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(a__ , """rb""") as fp:
snake_case__ : str = pickle.load(a__ , encoding="""latin1""")
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
snake_case__ : Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(F'Save vocabulary to {pytorch_vocab_dump_path}')
snake_case__ : List[Any] = corpus.vocab.__dict__
torch.save(a__ , a__)
snake_case__ : List[Any] = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , a__)
snake_case__ : Optional[Any] = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}')
torch.save(a__ , a__)
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
snake_case__ : Optional[Any] = os.path.abspath(a__)
snake_case__ : List[str] = os.path.abspath(a__)
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.')
# Initialise PyTorch model
if transfo_xl_config_file == "":
snake_case__ : str = TransfoXLConfig()
else:
snake_case__ : str = TransfoXLConfig.from_json_file(a__)
print(F'Building PyTorch model from configuration: {config}')
snake_case__ : Union[str, Any] = TransfoXLLMHeadModel(a__)
snake_case__ : List[Any] = load_tf_weights_in_transfo_xl(a__ , a__ , a__)
# Save pytorch-model
snake_case__ : int = os.path.join(a__ , a__)
snake_case__ : Tuple = os.path.join(a__ , a__)
print(F'Save PyTorch model to {os.path.abspath(a__)}')
torch.save(model.state_dict() , a__)
print(F'Save configuration file to {os.path.abspath(a__)}')
with open(a__ , """w""" , encoding="""utf-8""") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
lowercase_: Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowercase_: str = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 648 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17 | 0 |
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
if index == number_of_items:
return 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = knapsack(a__ , a__ , a__ , a__ , index + 1 )
if weights[index] <= max_weight:
snake_case_ = values[index] + knapsack(
a__ , a__ , a__ , max_weight - weights[index] , index + 1 )
return max(a__ , a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 | 0 |
'''simple docstring'''
UpperCamelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[str] ) -> str:
# Return True if there is node that has not iterated.
lowercase : str =[False] * len(a__ )
lowercase : Tuple =[s]
lowercase : Any =True
while queue:
lowercase : List[str] =queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(a__ )
lowercase : Tuple =True
lowercase : Any =u
return visited[t]
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Any ) -> int:
lowercase : Optional[Any] =[-1] * (len(a__ ))
lowercase : List[Any] =0
lowercase : Optional[Any] =[]
lowercase : int =[i[:] for i in graph] # Record original cut, copy.
while bfs(a__ , a__ , a__ , a__ ):
lowercase : int =float('''Inf''' )
lowercase : Optional[int] =sink
while s != source:
# Find the minimum value in select path
lowercase : str =min(a__ , graph[parent[s]][s] )
lowercase : int =parent[s]
max_flow += path_flow
lowercase : Union[str, Any] =sink
while v != source:
lowercase : Tuple =parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Any =parent[v]
for i in range(len(a__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 92 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Any ) -> Optional[int]: # noqa: E741
while r - l > 1:
__A : Any = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : List[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[str] = 1
__A : List[Any] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : int = v[i]
elif v[i] > tail[length - 1]:
__A : Union[str, Any] = v[i]
length += 1
else:
__A : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.6 , _SCREAMING_SNAKE_CASE=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = mask_ratio
_UpperCAmelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase ( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = TFViTMAEModel(config=__A )
_UpperCAmelCase = model(__A , training=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = TFViTMAEForPreTraining(__A )
_UpperCAmelCase = model(__A , training=__A )
# expected sequence length = num_patches
_UpperCAmelCase = (self.image_size // self.patch_size) ** 2
_UpperCAmelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = TFViTMAEForPreTraining(__A )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(__A , training=__A )
_UpperCAmelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(_UpperCAmelCase) = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _A ( _lowercase , _lowercase , unittest.TestCase ):
__a = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__a = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
__a = False
__a = False
__a = False
__a = False
def UpperCAmelCase ( self ):
_UpperCAmelCase = TFViTMAEModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def UpperCAmelCase ( self ):
pass
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , tf.keras.layers.Layer ) )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__A )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def UpperCAmelCase ( self ):
# make the mask reproducible
np.random.seed(2 )
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__A )
_UpperCAmelCase = self._prepare_for_class(__A , __A )
_UpperCAmelCase = model(__A , noise=__A )
_UpperCAmelCase = copy.deepcopy(self._prepare_for_class(__A , __A ) )
_UpperCAmelCase = model(**__A , noise=__A )
_UpperCAmelCase = outputs_dict[0].numpy()
_UpperCAmelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def UpperCAmelCase ( self ):
# make the mask reproducible
np.random.seed(2 )
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__A ):
_UpperCAmelCase = v.numpy()
else:
_UpperCAmelCase = np.array(__A )
return inputs_np_dict
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__A )
_UpperCAmelCase = self._prepare_for_class(__A , __A )
_UpperCAmelCase = prepare_numpy_arrays(__A )
_UpperCAmelCase = model(__A , noise=__A )
_UpperCAmelCase = model(**__A , noise=__A )
self.assert_outputs_same(__A , __A )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# make masks reproducible
np.random.seed(2 )
_UpperCAmelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCAmelCase = tf.constant(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_UpperCAmelCase = tf_noise
super().check_pt_tf_models(__A , __A , __A )
def UpperCAmelCase ( self ):
# make mask reproducible
np.random.seed(2 )
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__A )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(__A , __A ),)
if isinstance(__A , __A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__A , """_keras_serializable""" , __A )
}
_UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCAmelCase = tf.convert_to_tensor(__A )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
_UpperCAmelCase = main_layer_class(__A )
_UpperCAmelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_UpperCAmelCase = tf.keras.Model(__A , outputs=main_layer(__A ) )
_UpperCAmelCase = model(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__A , """keras_model.h5""" )
model.save(__A )
_UpperCAmelCase = tf.keras.models.load_model(
__A , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__A , tf.keras.Model )
_UpperCAmelCase = model(__A )
self.assert_outputs_same(__A , __A )
@slow
def UpperCAmelCase ( self ):
# make mask reproducible
np.random.seed(2 )
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__A )
_UpperCAmelCase = self._prepare_for_class(__A , __A )
_UpperCAmelCase = model(__A , noise=__A )
if model_class.__name__ == "TFViTMAEModel":
_UpperCAmelCase = outputs.last_hidden_state.numpy()
_UpperCAmelCase = 0
else:
_UpperCAmelCase = outputs.logits.numpy()
_UpperCAmelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A , saved_model=__A )
_UpperCAmelCase = model_class.from_pretrained(__A )
_UpperCAmelCase = model(__A , noise=__A )
if model_class.__name__ == "TFViTMAEModel":
_UpperCAmelCase = after_outputs["""last_hidden_state"""].numpy()
_UpperCAmelCase = 0
else:
_UpperCAmelCase = after_outputs["""logits"""].numpy()
_UpperCAmelCase = 0
_UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A , 1e-5 )
def UpperCAmelCase ( self ):
# make mask reproducible
np.random.seed(2 )
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__A )
_UpperCAmelCase = self._prepare_for_class(__A , __A )
_UpperCAmelCase = model(__A , noise=__A )
_UpperCAmelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__A )
_UpperCAmelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_UpperCAmelCase = model_class.from_config(model.config )
_UpperCAmelCase = new_model(__A ) # Build model
new_model.set_weights(model.get_weights() )
_UpperCAmelCase = new_model(__A , noise=__A )
self.assert_outputs_same(__A , __A )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def UpperCAmelCase ( self ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def UpperCAmelCase ( self ):
pass
@slow
def UpperCAmelCase ( self ):
_UpperCAmelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(__A )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_UpperCAmelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__A , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_UpperCAmelCase = ViTMAEConfig()
_UpperCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_UpperCAmelCase = model(**__A , noise=__A )
# verify the logits
_UpperCAmelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , __A )
_UpperCAmelCase = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __A , atol=1e-4 ) | 518 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 | 0 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
a_ = get_tests_dir('fixtures/dummy-config.json')
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 0
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__A , __A )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__A , __A )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE : Dict = os.path.join(__A , "fake-roberta" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
try:
AutoConfig.register("custom" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("model" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("bert" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
__A , "bert-base is not a local folder and is not a valid model identifier" ):
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained("bert-base" )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
__A , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(__A , revision="aaaaaa" )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
__A , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(__A ):
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A )
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
class _UpperCamelCase ( _lowercase ):
'''simple docstring'''
lowerCamelCase__ ='''new-model'''
try:
AutoConfig.register("new-model" , __A )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"] | 25 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 0 |
"""simple docstring"""
from math import sqrt
def lowercase ( a__ : int = 1000000 ) -> int:
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'''{solution() = }''')
| 420 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17 | 0 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a ( __a ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCamelCase__ :int = []
UpperCamelCase__ :str = []
UpperCamelCase__ :List[Any] = []
for rt in rc.restypes:
UpperCamelCase__ :str = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCamelCase__ :int = {name: i for i, name in enumerate(a__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCamelCase__ :Tuple = torch.tensor(
a__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCamelCase__ :Dict = torch.tensor(
a__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCamelCase__ :Tuple = torch.tensor(
a__ , dtype=torch.floataa , device=protein['''aatype'''].device , )
UpperCamelCase__ :Tuple = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCamelCase__ :Tuple = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase__ :Tuple = restype_atomaa_mask[protein_aatype]
UpperCamelCase__ :int = residx_atomaa_mask
UpperCamelCase__ :List[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCamelCase__ :List[str] = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase__ :Optional[Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCamelCase__ :int = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCamelCase__ :List[Any] = rc.restype_atoa[restype_letter]
UpperCamelCase__ :Optional[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCamelCase__ :Optional[Any] = rc.atom_order[atom_name]
UpperCamelCase__ :Optional[Any] = 1
UpperCamelCase__ :int = restype_atomaa_mask[protein_aatype]
UpperCamelCase__ :Tuple = residx_atomaa_mask
return protein
def a ( __a ) -> Dict[str, np.ndarray]:
'''simple docstring'''
UpperCamelCase__ :int = tree_map(lambda __a : torch.tensor(a__ , device=batch['''aatype'''].device ) , a__ , np.ndarray )
UpperCamelCase__ :int = tensor_tree_map(lambda __a : np.array(a__ ) , make_atomaa_masks(a__ ) )
return out | 189 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]:
# Initialise PyTorch model
__A : Dict = MobileBertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__A : Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
__A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 17 | 0 |
import pytest
SCREAMING_SNAKE_CASE_:Optional[int] = '''__dummy_dataset1__'''
SCREAMING_SNAKE_CASE_:str = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
A : Tuple = dataset_loading_script_name
A : Optional[Any] = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=a__ )
A : List[Any] = script_dir / f'''{script_name}.py'''
with open(a__ , """w""" ) as f:
f.write(a__ )
return str(a__ )
| 662 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class _a ( _lowercase ):
a_ : Optional[Any] = '''funnel'''
a_ : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : List[str]=[4, 4, 4] , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : List[Any]=64 , SCREAMING_SNAKE_CASE__ : Optional[Any]=30_72 , SCREAMING_SNAKE_CASE__ : str="gelu_new" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=1e-9 , SCREAMING_SNAKE_CASE__ : Optional[Any]="mean" , SCREAMING_SNAKE_CASE__ : Optional[int]="relative_shift" , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , **SCREAMING_SNAKE_CASE__ : Tuple , ):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = block_sizes
lowerCamelCase__ = [1] * len(__A ) if block_repeats is None else block_repeats
assert len(__A ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
lowerCamelCase__ = num_decoder_layers
lowerCamelCase__ = d_model
lowerCamelCase__ = n_head
lowerCamelCase__ = d_head
lowerCamelCase__ = d_inner
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = initializer_range
lowerCamelCase__ = initializer_std
lowerCamelCase__ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
lowerCamelCase__ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
lowerCamelCase__ = attention_type
lowerCamelCase__ = separate_cls
lowerCamelCase__ = truncate_seq
lowerCamelCase__ = pool_q_only
super().__init__(**__A )
@property
def _UpperCamelCase ( self : Dict ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def _UpperCamelCase ( self : Optional[Any] ):
return len(self.block_sizes )
@num_blocks.setter
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
| 510 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 0 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
A__ : int = 10
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ):
for i in range(a__ ,a__ ):
if array[i] == target:
return i
return -1
def UpperCamelCase( __UpperCamelCase : list[int] ,__UpperCamelCase : int ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = len(a__ )
while left <= right:
if right - left < precision:
return lin_search(a__ ,a__ ,a__ ,a__ )
lowerCAmelCase_ : Optional[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : Union[str, Any] = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : List[Any] = two_third + 1
else:
lowerCAmelCase_ : List[Any] = one_third + 1
lowerCAmelCase_ : int = two_third - 1
else:
return -1
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ):
if left < right:
if right - left < precision:
return lin_search(a__ ,a__ ,a__ ,a__ )
lowerCAmelCase_ : Optional[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(a__ ,one_third - 1 ,a__ ,a__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 ,a__ ,a__ ,a__ )
else:
return rec_ternary_search(one_third + 1 ,two_third - 1 ,a__ ,a__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Any = input('''Enter numbers separated by comma:\n''').strip()
A__ : List[str] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
A__ : Optional[int] = int(input('''Enter the number to be found in the list:\n''').strip())
A__ : List[Any] = ite_ternary_search(collection, target)
A__ : Optional[int] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 171 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 0 |
'''simple docstring'''
UpperCAmelCase = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.35_5818,
}
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCAmelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
f'Valid values are: {", ".join(a__ )}'
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod() | 433 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase_: Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: int = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowercase_: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 648 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCamelCase__ ( _A ):
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def lowerCamelCase__ ( _A ):
'''simple docstring'''
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , __lowercase : Dict ):
"""simple docstring"""
snake_case_ = metric_id
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = [MetricMock(_lowercase ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def snake_case__ ( self : str ):
"""simple docstring"""
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
if "tmp_path" in args:
snake_case_ = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(a__ , match="https://huggingface.co/docs/evaluate" ):
func(*a__ )
| 376 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
UpperCamelCase_ = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _lowerCAmelCase ( __magic_name__ : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
lowercase : Dict =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : Optional[int] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : str =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 92 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
from __future__ import annotations
import typing
from collections import Counter
def _SCREAMING_SNAKE_CASE ( snake_case ) -> typing.Counter[int]:
_UpperCAmelCase = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a__ , max_perimeter + 1 ):
_UpperCAmelCase = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a__ ):
_UpperCAmelCase = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _SCREAMING_SNAKE_CASE ( snake_case = 1_0_0_0 ) -> int:
_UpperCAmelCase = pythagorean_triple(a__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'Perimeter {solution()} has maximum solutions') | 518 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
import sys
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a__)
SCREAMING_SNAKE_CASE : Any = [[0 for x in range(a__)] for x in range(a__)]
SCREAMING_SNAKE_CASE : Optional[Any] = [[0 for x in range(a__)] for x in range(a__)]
for chain_length in range(2 , a__):
for a in range(1 , n - chain_length + 1):
SCREAMING_SNAKE_CASE : Tuple = a + chain_length - 1
SCREAMING_SNAKE_CASE : List[str] = sys.maxsize
for c in range(a__ , a__):
SCREAMING_SNAKE_CASE : List[str] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
SCREAMING_SNAKE_CASE : Any = cost
SCREAMING_SNAKE_CASE : Any = c
return matrix, sol
def lowerCamelCase__ ( _a , _a , _a):
if i == j:
print("A" + str(a__) , end=" ")
else:
print("(" , end=" ")
print_optiomal_solution(a__ , a__ , optimal_solution[i][j])
print_optiomal_solution(a__ , optimal_solution[i][j] + 1 , a__)
print(")" , end=" ")
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[int] = [30, 35, 15, 5, 10, 20, 25]
SCREAMING_SNAKE_CASE : Optional[int] = len(a__)
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
SCREAMING_SNAKE_CASE : Optional[int] = matrix_chain_order(a__)
print("No. of Operation required: " + str(matrix[1][n - 1]))
print_optiomal_solution(a__ , 1 , n - 1)
if __name__ == "__main__":
main() | 25 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ : Optional[Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = None
_lowercase : str = None
def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
__A : List[Any] = dataset_module_factory(__A , cache_dir=__A )
__A : Tuple = import_main_class(dataset_module.module_path , dataset=__A )
__A : DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
__A : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__A : Union[str, Any] = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : List[Any] = import_main_class(dataset_module.module_path )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__A : Any = None
builder_instance.download_and_prepare()
__A : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]:
__A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : str = import_main_class(dataset_module.module_path ,dataset=a__ )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
__A : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ ,a__ )
assert "train" in ds
assert isinstance(ds["""train"""] ,a__ )
assert next(iter(ds["""train"""] ) )
| 17 | 0 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase = [10, 20, 30, 40, 50, 60]
_UpperCamelCase = [2, 4, 6, 8, 10, 12]
_UpperCamelCase = 100
self.assertEqual(kp.calc_profit(__A , __A , __A ) , 210 )
def _UpperCamelCase ( self : str ) -> List[str]:
self.assertRaisesRegex(__A , '''max_weight must greater than zero.''' )
def _UpperCamelCase ( self : List[str] ) -> List[Any]:
self.assertRaisesRegex(__A , '''Weight can not be negative.''' )
def _UpperCamelCase ( self : Optional[Any] ) -> Dict:
self.assertRaisesRegex(__A , '''Profit can not be negative.''' )
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
self.assertRaisesRegex(__A , '''max_weight must greater than zero.''' )
def _UpperCamelCase ( self : Dict ) -> Union[str, Any]:
self.assertRaisesRegex(
__A , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 420 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 0 |
'''simple docstring'''
def a ( __a ) -> int:
'''simple docstring'''
stooge(a__ , 0 , len(a__ ) - 1 )
return arr
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ :Optional[int] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :List[Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(a__ , a__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(a__ , i + t , (a__) )
# Recursively sort first 2/3 elements
stooge(a__ , a__ , (h - t) )
if __name__ == "__main__":
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted)) | 189 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 0 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = 32, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = True, lowerCamelCase__ = [0.4814_5466, 0.457_8275, 0.4082_1073], lowerCamelCase__ = [0.2686_2954, 0.2613_0258, 0.2757_7711], lowerCamelCase__ = True, lowerCamelCase__=7, lowerCamelCase__=30, lowerCamelCase__=400, lowerCamelCase__=3, ):
A : str = parent
A : List[Any] = do_resize
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 288}
A : Optional[Any] = size_divisor
A : str = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Tuple = do_center_crop
A : Any = image_mean
A : Union[str, Any] = image_std
A : str = do_pad
A : Any = batch_size
A : Union[str, Any] = num_channels
A : Dict = min_resolution
A : Tuple = max_resolution
def _lowerCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=False ):
if not batched:
A : Optional[Any] = self.size["""shortest_edge"""]
A : Optional[int] = image_inputs[0]
if isinstance(__A, Image.Image ):
A : Any = image.size
else:
A : Dict = image.shape[1], image.shape[2]
A : Tuple = size / min(__A, __A )
if h < w:
A : Union[str, Any] = size, scale * w
else:
A : str = scale * h, size
A : List[str] = int((1333 / 800) * size )
if max(__A, __A ) > max_size:
A : Optional[int] = max_size / max(__A, __A )
A : Tuple = newh * scale
A : Optional[Any] = neww * scale
A : Tuple = int(newh + 0.5 ), int(neww + 0.5 )
A : List[Any] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A : Dict = []
for image in image_inputs:
A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : Dict = max(__A, key=lambda lowerCamelCase__ : item[0] )[0]
A : Dict = max(__A, key=lambda lowerCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = BridgeTowerImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
A : str = BridgeTowerImageProcessingTester(self )
@property
def _lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
A : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A, """image_mean""" ) )
self.assertTrue(hasattr(__A, """image_std""" ) )
self.assertTrue(hasattr(__A, """do_normalize""" ) )
self.assertTrue(hasattr(__A, """do_resize""" ) )
self.assertTrue(hasattr(__A, """size""" ) )
self.assertTrue(hasattr(__A, """size_divisor""" ) )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
# Initialize image processor
A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Optional[int] = prepare_image_inputs(self.image_processor_tester, equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A, Image.Image )
# Test not batched input
A : Dict = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
A : Optional[Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
A : Union[str, Any] = image_processing(__A, return_tensors="""pt""" ).pixel_values
A : List[Any] = self.image_processor_tester.get_expected_values(__A, batched=__A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def _lowerCAmelCase ( self ):
# Initialize image processor
A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : Optional[int] = prepare_image_inputs(self.image_processor_tester, equal_resolution=__A, numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A, np.ndarray )
# Test not batched input
A : Union[str, Any] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
A : str = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
A : List[str] = image_processing(__A, return_tensors="""pt""" ).pixel_values
A : Dict = self.image_processor_tester.get_expected_values(__A, batched=__A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def _lowerCAmelCase ( self ):
# Initialize image processor
A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=__A, torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A, torch.Tensor )
# Test not batched input
A : List[Any] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
A : str = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
A : int = image_processing(__A, return_tensors="""pt""" ).pixel_values
A : Optional[Any] = self.image_processor_tester.get_expected_values(__A, batched=__A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
| 662 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def snake_case ( _a: Callable )-> Callable:
'''simple docstring'''
@wraps(a__ )
def _inner_fn(*_a: Union[str, Any] , **_a: Any ):
warnings.warn(
(F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , a__ , )
return fn(*a__ , **a__ )
return _inner_fn
| 510 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 | 0 |
def UpperCamelCase( __UpperCamelCase : int ):
if not isinstance(a__ ,a__ ):
raise TypeError('''Input value must be an \'int\' type''' )
lowerCAmelCase_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __snake_case( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ = None , A_ = None ) -> Optional[Any]:
super().__init__()
lowerCAmelCase = pad_token_id
lowerCAmelCase = max_length
lowerCAmelCase = vocab
lowerCAmelCase = merges
lowerCAmelCase = BytePairTokenizer(__A , __A , sequence_length=__A )
@classmethod
def __snake_case ( cls , A_ , *A_ , **A_ ) -> Any:
lowerCAmelCase = [""" """.join(__A ) for m in tokenizer.bpe_ranks.keys()]
lowerCAmelCase = tokenizer.get_vocab()
return cls(__A , __A , *__A , **__A )
@classmethod
def __snake_case ( cls , A_ , *A_ , **A_ ) -> str:
lowerCAmelCase = GPTaTokenizer.from_pretrained(__A , *__A , **__A )
return cls.from_tokenizer(__A , *__A , **__A )
@classmethod
def __snake_case ( cls , A_ ) -> List[str]:
return cls(**__A )
def __snake_case ( self ) -> Tuple:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __snake_case ( self , A_ , A_ = None ) -> List[str]:
lowerCAmelCase = self.tf_tokenizer(__A )
lowerCAmelCase = tf.ones_like(__A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCAmelCase = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCAmelCase = pad_model_inputs(
__A , max_seq_length=__A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids} | 433 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 | 0 |
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if not isinstance(a__ , a__):
snake_case__ : str = F'Input value of [number={number}] must be an integer'
raise TypeError(a__)
if number < 1:
snake_case__ : Optional[int] = F'Input value of [number={number}] must be > 0'
raise ValueError(a__)
snake_case__ : Optional[Any] = 1
for i in range(1 , a__):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case_ = cst_fwd.get(a__ , np.inf )
snake_case_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case_ = new_cost_f
snake_case_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = -1
snake_case_ = set()
snake_case_ = set()
snake_case_ = {source: 0}
snake_case_ = {destination: 0}
snake_case_ = {source: None}
snake_case_ = {destination: None}
snake_case_ = PriorityQueue()
snake_case_ = PriorityQueue()
snake_case_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case_ = queue_forward.get()
visited_forward.add(a__ )
snake_case_ = queue_backward.get()
visited_backward.add(a__ )
snake_case_ = pass_and_relaxation(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
snake_case_ = pass_and_relaxation(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case_ = shortest_distance
return shortest_path_distance
lowercase__ : Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
lowercase__ : Any = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
lowercase : List[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Any =True
lowercase : Any =True
lowercase : Dict =True
lowercase : int =99
lowercase : List[str] =32
lowercase : Optional[Any] =2
lowercase : Dict =4
lowercase : Dict =37
lowercase : List[str] ="""gelu"""
lowercase : str =0.1
lowercase : List[Any] =0.1
lowercase : Dict =512
lowercase : str =16
lowercase : Union[str, Any] =2
lowercase : Optional[int] =0.02
lowercase : Optional[int] =3
lowercase : int =4
lowercase : Optional[int] =None
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : int =None
if self.use_input_mask:
lowercase : str =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Dict =None
lowercase : int =None
if self.use_labels:
lowercase : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : str =ids_tensor([self.batch_size] , self.num_choices )
lowercase : str =EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
(
lowercase
) : int =self.prepare_config_and_inputs()
lowercase : str =True
lowercase : Tuple =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =TFEsmModel(config=__A )
lowercase : List[Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase : Dict =model(__A )
lowercase : List[str] =[input_ids, input_mask]
lowercase : List[str] =model(__A )
lowercase : Tuple =model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , ):
'''simple docstring'''
lowercase : List[str] =True
lowercase : Optional[int] =TFEsmModel(config=__A )
lowercase : Dict ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
lowercase : Optional[int] =model(__A )
lowercase : List[Any] =[input_ids, input_mask]
lowercase : int =model(__A , encoder_hidden_states=__A )
# Also check the case where encoder outputs are not passed
lowercase : Tuple =model(__A , attention_mask=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =TFEsmForMaskedLM(config=__A )
lowercase : Optional[int] =model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : Tuple =self.num_labels
lowercase : Any =TFEsmForTokenClassification(config=__A )
lowercase : Tuple ={"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase : List[Any] =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Tuple =self.prepare_config_and_inputs()
(
lowercase
) : str =config_and_inputs
lowercase : List[str] ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase_ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Union[str, Any] =TFEsmModelTester(self )
lowercase : List[Any] =ConfigTester(self , config_class=__A , hidden_size=37 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Tuple =TFEsmModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str =model_class(__A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase : List[str] =model.get_bias()
assert isinstance(__A , __A )
for k, v in name.items():
assert isinstance(__A , tf.Variable )
else:
lowercase : Tuple =model.get_output_embeddings()
assert x is None
lowercase : int =model.get_bias()
assert name is None
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[int] =TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowercase : Union[str, Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : Optional[int] =model(__A )[0]
lowercase : Dict =[1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __A )
# compare the actual values for a slice.
lowercase : Union[str, Any] =tf.constant(
[
[
[8.92_15_18, -10.589814, -6.4_67_13_07],
[-6.3_96_71_56, -13.911377, -1.1_21_19_15],
[-7.78_12_47, -13.951557, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : str =TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowercase : Optional[Any] =tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase : Tuple =model(__A )[0]
# compare the actual values for a slice.
lowercase : Union[str, Any] =tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 92 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Any ) -> Optional[int]: # noqa: E741
while r - l > 1:
__A : Any = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : List[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[str] = 1
__A : List[Any] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : int = v[i]
elif v[i] > tail[length - 1]:
__A : Union[str, Any] = v[i]
length += 1
else:
__A : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
a = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[Any]:
with open(a__ , """rb""" ) as f:
_UpperCAmelCase = Image.open(a__ )
return im.convert("""RGB""" )
@dataclass
class _A :
__a = field(
default=_lowercase , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
__a = field(
default=_lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__a = field(default=_lowercase , metadata={"""help""": """A folder containing the training data."""} )
__a = field(default=_lowercase , metadata={"""help""": """A folder containing the validation data."""} )
__a = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
__a = field(
default=_lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a = field(
default=_lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase ( self ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class _A :
__a = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
__a = field(
default=_lowercase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_lowercase )} , )
__a = field(
default=_lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a = field(
default=_lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
__a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__a = field(default=_lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
__a = field(
default=_lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__a = field(
default=_lowercase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Tuple:
_UpperCAmelCase = torch.stack([example["""pixel_values"""] for example in examples] )
_UpperCAmelCase = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , a__ , a__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(a__ )
transformers.utils.logging.set_verbosity(a__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_UpperCAmelCase = {}
if data_args.train_dir is not None:
_UpperCAmelCase = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
_UpperCAmelCase = os.path.join(data_args.validation_dir , """**""" )
_UpperCAmelCase = load_dataset(
"""imagefolder""" , data_files=a__ , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , a__ ) and data_args.train_val_split > 0.0:
_UpperCAmelCase = dataset["""train"""].train_test_split(data_args.train_val_split )
_UpperCAmelCase = split["""train"""]
_UpperCAmelCase = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCAmelCase = dataset["""train"""].features["""labels"""].names
_UpperCAmelCase = {}, {}
for i, label in enumerate(a__ ):
_UpperCAmelCase = str(a__ )
_UpperCAmelCase = label
# Load the accuracy metric from the datasets package
_UpperCAmelCase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(a__ ) , labelaid=a__ , idalabel=a__ , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_UpperCAmelCase = image_processor.size["""shortest_edge"""]
else:
_UpperCAmelCase = (image_processor.size["""height"""], image_processor.size["""width"""])
_UpperCAmelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_UpperCAmelCase = Compose(
[
RandomResizedCrop(a__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_UpperCAmelCase = Compose(
[
Resize(a__ ),
CenterCrop(a__ ),
ToTensor(),
normalize,
] )
def train_transforms(snake_case ):
_UpperCAmelCase = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(snake_case ):
_UpperCAmelCase = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
_UpperCAmelCase = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(a__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
_UpperCAmelCase = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(a__ )
# Initalize our trainer
_UpperCAmelCase = Trainer(
model=a__ , args=a__ , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=a__ , tokenizer=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=a__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , a__ )
trainer.save_metrics("""eval""" , a__ )
# Write model card and (optionally) push to hub
_UpperCAmelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a__ )
else:
trainer.create_model_card(**a__ )
if __name__ == "__main__":
main() | 518 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 | 0 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a_ = None
a_ = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a_ = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =True
lowerCamelCase__ =None
# Automatically constructed
lowerCamelCase__ ="PIL.Image.Image"
lowerCamelCase__ =pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCamelCase__ =field(default='Image' , init=_lowercase , repr=_lowercase )
def __call__( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.pa_type
def __UpperCamelCase ( self : Optional[Any] , a : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> Union[str, Any]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(__A )
if isinstance(__A , __A ):
return {"path": value, "bytes": None}
elif isinstance(__A , __A ):
return {"path": None, "bytes": value}
elif isinstance(__A , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__A )
elif isinstance(__A , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__A )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def __UpperCamelCase ( self : Union[str, Any] , a : dict , a : Any=None ) -> Tuple:
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Dict = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(__A ):
SCREAMING_SNAKE_CASE : Optional[Any] = PIL.Image.open(__A )
else:
SCREAMING_SNAKE_CASE : Optional[int] = path.split("::" )[-1]
try:
SCREAMING_SNAKE_CASE : Dict = string_to_dict(__A , config.HUB_DATASETS_URL )["""repo_id"""]
SCREAMING_SNAKE_CASE : Dict = token_per_repo_id.get(__A )
except ValueError:
SCREAMING_SNAKE_CASE : List[str] = None
with xopen(__A , "rb" , use_auth_token=__A ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = BytesIO(f.read() )
SCREAMING_SNAKE_CASE : Any = PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE : Tuple = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __UpperCamelCase ( self : Dict , a : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> Optional[int]:
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE : List[Any] = pa.array([None] * len(__A ) , type=pa.binary() )
SCREAMING_SNAKE_CASE : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE : Optional[int] = pa.array([None] * len(__A ) , type=pa.string() )
SCREAMING_SNAKE_CASE : Dict = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
SCREAMING_SNAKE_CASE : List[str] = storage.field("bytes" )
else:
SCREAMING_SNAKE_CASE : List[str] = pa.array([None] * len(__A ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
SCREAMING_SNAKE_CASE : int = storage.field("path" )
else:
SCREAMING_SNAKE_CASE : List[str] = pa.array([None] * len(__A ) , type=pa.string() )
SCREAMING_SNAKE_CASE : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE : Any = pa.array(
[encode_np_array(np.array(__A ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE : Dict = pa.array([None] * len(__A ) , type=pa.string() )
SCREAMING_SNAKE_CASE : int = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__A , self.pa_type )
def __UpperCamelCase ( self : List[str] , a : pa.StructArray ) -> List[Any]:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a : List[str] ):
with xopen(__A , "rb" ) as f:
SCREAMING_SNAKE_CASE : Any = f.read()
return bytes_
SCREAMING_SNAKE_CASE : Union[str, Any] = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE : Any = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__A , self.pa_type )
def lowerCamelCase__ ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE : Dict = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE : int = image.format
else:
SCREAMING_SNAKE_CASE : Any = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(a__ , format=a__)
return buffer.getvalue()
def lowerCamelCase__ ( _a):
if hasattr(a__ , "filename") and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(a__)}
def lowerCamelCase__ ( _a):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
SCREAMING_SNAKE_CASE : int = array.dtype
SCREAMING_SNAKE_CASE : Dict = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE : Optional[Any] = dtype.kind
SCREAMING_SNAKE_CASE : Any = dtype.itemsize
SCREAMING_SNAKE_CASE : str = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE : Optional[int] = np.dtype("|u1")
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.")
if dtype is not dest_dtype:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE : List[Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = dtype_byteorder + dtype_kind + str(a__)
SCREAMING_SNAKE_CASE : Tuple = np.dtype(a__)
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}")
SCREAMING_SNAKE_CASE : Dict = PIL.Image.fromarray(array.astype(a__))
return {"path": None, "bytes": image_to_bytes(a__)}
def lowerCamelCase__ ( _a):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if objs:
SCREAMING_SNAKE_CASE : Any = first_non_null_value(a__)
if isinstance(a__ , a__):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(a__ , np.ndarray):
SCREAMING_SNAKE_CASE : Tuple = no_op_if_value_is_null(a__)
return [obj_to_image_dict_func(a__) for obj in objs]
elif isinstance(a__ , PIL.Image.Image):
SCREAMING_SNAKE_CASE : str = no_op_if_value_is_null(a__)
return [obj_to_image_dict_func(a__) for obj in objs]
else:
return objs
else:
return objs | 25 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 420 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=__A , )
assert hasattr(self , '''env''' )
def lowerCAmelCase__ ( self , UpperCamelCase_=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
TrainingJobAnalytics(__A ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCamelCase__ :Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase__ :List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCamelCase__ :Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase__ :List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __A ) | 189 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]:
# Initialise PyTorch model
__A : Dict = MobileBertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__A : Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
__A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 17 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[str] = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _lowercase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = '''blip_2_vision_model'''
def __init__( self, lowerCamelCase__=1408, lowerCamelCase__=6144, lowerCamelCase__=39, lowerCamelCase__=16, lowerCamelCase__=224, lowerCamelCase__=14, lowerCamelCase__="gelu", lowerCamelCase__=0.0_0001, lowerCamelCase__=0.0, lowerCamelCase__=1e-10, lowerCamelCase__=True, **lowerCamelCase__, ):
super().__init__(**__A )
A : Dict = hidden_size
A : int = intermediate_size
A : List[Any] = num_hidden_layers
A : Tuple = num_attention_heads
A : List[Any] = patch_size
A : Any = image_size
A : Dict = initializer_range
A : List[Any] = attention_dropout
A : Optional[Any] = layer_norm_eps
A : Dict = hidden_act
A : str = qkv_bias
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, **lowerCamelCase__ ):
cls._set_token_in_kwargs(__A )
A : Optional[Any] = cls.get_config_dict(__A, **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
A : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A, **__A )
class SCREAMING_SNAKE_CASE__ ( _lowercase ):
'''simple docstring'''
__lowerCamelCase : List[str] = '''blip_2_qformer'''
def __init__( self, lowerCamelCase__=3_0522, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=0, lowerCamelCase__="absolute", lowerCamelCase__=2, lowerCamelCase__=1408, **lowerCamelCase__, ):
super().__init__(pad_token_id=__A, **__A )
A : List[Any] = vocab_size
A : int = hidden_size
A : Dict = num_hidden_layers
A : Union[str, Any] = num_attention_heads
A : Union[str, Any] = hidden_act
A : Tuple = intermediate_size
A : Tuple = hidden_dropout_prob
A : Any = attention_probs_dropout_prob
A : int = max_position_embeddings
A : List[Any] = initializer_range
A : Optional[Any] = layer_norm_eps
A : Optional[int] = position_embedding_type
A : List[str] = cross_attention_frequency
A : List[Any] = encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, **lowerCamelCase__ ):
cls._set_token_in_kwargs(__A )
A : Any = cls.get_config_dict(__A, **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
A : int = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A, **__A )
class SCREAMING_SNAKE_CASE__ ( _lowercase ):
'''simple docstring'''
__lowerCamelCase : List[str] = '''blip-2'''
__lowerCamelCase : Any = True
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=32, **lowerCamelCase__ ):
super().__init__(**__A )
if vision_config is None:
A : List[Any] = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
A : Union[str, Any] = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
A : Optional[Any] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
A : Any = BlipaVisionConfig(**__A )
A : Tuple = BlipaQFormerConfig(**__A )
A : Any = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
A : str = CONFIG_MAPPING[text_model_type](**__A )
A : List[Any] = self.text_config.tie_word_embeddings
A : int = self.text_config.is_encoder_decoder
A : Union[str, Any] = num_query_tokens
A : int = self.vision_config.hidden_size
A : List[str] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
A : Optional[int] = 1.0
A : Optional[Any] = 0.02
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
return cls(
vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **__A, )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = copy.deepcopy(self.__dict__ )
A : int = self.vision_config.to_dict()
A : Optional[Any] = self.qformer_config.to_dict()
A : Optional[Any] = self.text_config.to_dict()
A : Any = self.__class__.model_type
return output
| 662 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class _a ( _lowercase ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
def __call__( self : List[Any] ):
lowerCamelCase__ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCamelCase__ = 1
lowerCamelCase__ = self.unet(__A , __A ).sample
lowerCamelCase__ = self.scheduler.step(__A , __A , __A ).prev_sample
lowerCamelCase__ = scheduler_output - scheduler_output + torch.ones_like(__A )
return result
| 510 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A__ : Any = logging.get_logger(__name__)
A__ : str = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __snake_case ( _lowercase ):
_a = '''blenderbot-small'''
_a = ['''past_key_values''']
_a = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[Any] , A_ : Union[str, Any]=5_0_2_6_5 , A_ : str=5_1_2 , A_ : Any=8 , A_ : str=2_0_4_8 , A_ : List[Any]=1_6 , A_ : Dict=8 , A_ : Union[str, Any]=2_0_4_8 , A_ : Union[str, Any]=1_6 , A_ : Optional[int]=0.0 , A_ : Dict=0.0 , A_ : Dict=True , A_ : Any=True , A_ : Dict="gelu" , A_ : int=5_1_2 , A_ : str=0.1 , A_ : Optional[int]=0.0 , A_ : Union[str, Any]=0.0 , A_ : Optional[int]=0.02 , A_ : Union[str, Any]=1 , A_ : str=False , A_ : Tuple=0 , A_ : Union[str, Any]=1 , A_ : Tuple=2 , A_ : Optional[int]=2 , **A_ : List[Any] , ):
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : int = d_model
lowerCAmelCase_ : int = encoder_ffn_dim
lowerCAmelCase_ : Any = encoder_layers
lowerCAmelCase_ : Any = encoder_attention_heads
lowerCAmelCase_ : List[Any] = decoder_ffn_dim
lowerCAmelCase_ : Any = decoder_layers
lowerCAmelCase_ : int = decoder_attention_heads
lowerCAmelCase_ : int = dropout
lowerCAmelCase_ : str = attention_dropout
lowerCAmelCase_ : Any = activation_dropout
lowerCAmelCase_ : List[str] = activation_function
lowerCAmelCase_ : Optional[int] = init_std
lowerCAmelCase_ : int = encoder_layerdrop
lowerCAmelCase_ : List[str] = decoder_layerdrop
lowerCAmelCase_ : List[str] = use_cache
lowerCAmelCase_ : int = encoder_layers
lowerCAmelCase_ : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
class __snake_case ( _lowercase ):
@property
def UpperCAmelCase__ ( self : Optional[int]):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
lowerCAmelCase_ : Optional[Any] = {0: """batch"""}
lowerCAmelCase_ : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCAmelCase_ : Tuple = {0: """batch""", 1: """decoder_sequence"""}
lowerCAmelCase_ : str = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__A , direction='''inputs''')
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase_ : Union[str, Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
lowerCAmelCase_ : Dict = self.num_layers
for i in range(__A):
lowerCAmelCase_ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCAmelCase_ : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowerCAmelCase_ : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
])
return common_inputs
@property
def UpperCAmelCase__ ( self : Any):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : Any = super().outputs
else:
lowerCAmelCase_ : Dict = super(__A , self).outputs
if self.use_past:
lowerCAmelCase_ : Tuple = self.num_layers
for i in range(__A):
lowerCAmelCase_ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCAmelCase_ : int = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCAmelCase__ ( self : List[Any] , A_ : PreTrainedTokenizer , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , ):
lowerCAmelCase_ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , __A , __A , __A , __A)
# Generate decoder inputs
lowerCAmelCase_ : str = seq_length if not self.use_past else 1
lowerCAmelCase_ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , __A , __A , __A , __A)
lowerCAmelCase_ : Dict = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase_ : str = dict(**__A , **__A)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
lowerCAmelCase_ : List[Any] = common_inputs["""input_ids"""].shape
lowerCAmelCase_ : List[str] = common_inputs["""decoder_input_ids"""].shape[1]
lowerCAmelCase_ : int = self.num_attention_heads
lowerCAmelCase_ : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase_ : Optional[Any] = decoder_seq_length + 3
lowerCAmelCase_ : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase_ : List[str] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__A , __A)] , dim=1)
lowerCAmelCase_ : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase_ : Any = self.num_layers
lowerCAmelCase_ : int = min(__A , __A)
lowerCAmelCase_ : Tuple = max(__A , __A) - min_num_layers
lowerCAmelCase_ : Dict = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__A):
common_inputs["past_key_values"].append(
(
torch.zeros(__A),
torch.zeros(__A),
torch.zeros(__A),
torch.zeros(__A),
))
# TODO: test this.
lowerCAmelCase_ : List[str] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__A , __A):
common_inputs["past_key_values"].append((torch.zeros(__A), torch.zeros(__A)))
return common_inputs
def UpperCAmelCase__ ( self : List[str] , A_ : PreTrainedTokenizer , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , ):
lowerCAmelCase_ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , __A , __A , __A , __A)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
lowerCAmelCase_ : Union[str, Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : int = seqlen + 2
lowerCAmelCase_ : Optional[Any] = self.num_layers
lowerCAmelCase_ : Optional[Any] = self.num_attention_heads
lowerCAmelCase_ : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase_ : Union[str, Any] = common_inputs["""attention_mask"""].dtype
lowerCAmelCase_ : int = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__A , __A , dtype=__A)] , dim=1)
lowerCAmelCase_ : Tuple = [
(torch.zeros(__A), torch.zeros(__A)) for _ in range(__A)
]
return common_inputs
def UpperCAmelCase__ ( self : int , A_ : PreTrainedTokenizer , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : List[str] = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase_ : List[str] = tokenizer.num_special_tokens_to_add(__A)
lowerCAmelCase_ : Tuple = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__A)
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase_ : Any = [""" """.join([tokenizer.unk_token]) * seq_length] * batch_size
lowerCAmelCase_ : Any = dict(tokenizer(__A , return_tensors=__A))
return common_inputs
def UpperCAmelCase__ ( self : List[str] , A_ : PreTrainedTokenizer , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : Any = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A)
elif self.task == "causal-lm":
lowerCAmelCase_ : List[str] = self._generate_dummy_inputs_for_causal_lm(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A)
else:
lowerCAmelCase_ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A)
return common_inputs
def UpperCAmelCase__ ( self : int , A_ : int , A_ : Dict , A_ : str , A_ : Dict):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : Union[str, Any] = super()._flatten_past_key_values_(__A , __A , __A , __A)
else:
lowerCAmelCase_ : Union[str, Any] = super(__A , self)._flatten_past_key_values_(
__A , __A , __A , __A)
| 171 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> Union[str, Any]:
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = apply_ocr
def __snake_case ( self ) -> Dict:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __snake_case( _lowercase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __snake_case ( self ) -> str:
lowerCAmelCase = LayoutLMvaImageProcessingTester(self )
@property
def __snake_case ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
self.assertTrue(hasattr(__A , """apply_ocr""" ) )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __snake_case ( self ) -> List[str]:
pass
def __snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __A )
self.assertIsInstance(encoding.boxes , __A )
# Test batched
lowerCAmelCase = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __snake_case ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __snake_case ( self ) -> Dict:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __snake_case ( self ) -> Optional[int]:
# with apply_OCR = True
lowerCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
lowerCAmelCase = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
lowerCAmelCase = image_processing(__A , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
lowerCAmelCase = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __A )
self.assertListEqual(encoding.boxes , __A )
# with apply_OCR = False
lowerCAmelCase = LayoutLMvaImageProcessor(apply_ocr=__A )
lowerCAmelCase = image_processing(__A , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) | 433 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 0 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_: Dict = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
lowercase_: Dict = logging.get_logger(__name__)
class lowercase__ (_lowercase ):
"""simple docstring"""
__UpperCamelCase : Tuple = '''mask2former'''
__UpperCamelCase : Optional[Any] = ['''swin''']
__UpperCamelCase : Dict = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Optional[Any] , __a : Optional[Dict] = None , __a : int = 2_5_6 , __a : int = 2_5_6 , __a : int = 2_5_6 , __a : int = 1_0_2_4 , __a : str = "relu" , __a : int = 6 , __a : int = 1_0 , __a : int = 8 , __a : float = 0.0 , __a : int = 2_0_4_8 , __a : bool = False , __a : bool = False , __a : int = 4 , __a : int = 2_5_5 , __a : int = 1_0_0 , __a : float = 0.1 , __a : float = 2.0 , __a : float = 5.0 , __a : float = 5.0 , __a : int = 1_2_5_4_4 , __a : float = 3.0 , __a : float = 0.75 , __a : float = 0.02 , __a : float = 1.0 , __a : bool = True , __a : List[int] = [4, 8, 1_6, 3_2] , __a : bool = None , **__a : Dict , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
snake_case__ : List[Any] = CONFIG_MAPPING["""swin"""](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__A , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__A , __A ):
snake_case__ : Any = backbone_config.pop("""model_type""" )
snake_case__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
snake_case__ : str = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
snake_case__ : int = backbone_config
snake_case__ : Optional[Any] = feature_size
snake_case__ : Union[str, Any] = mask_feature_size
snake_case__ : List[str] = hidden_dim
snake_case__ : Union[str, Any] = encoder_feedforward_dim
snake_case__ : int = activation_function
snake_case__ : Any = encoder_layers
snake_case__ : str = decoder_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : Tuple = dropout
snake_case__ : Tuple = dim_feedforward
snake_case__ : Optional[int] = pre_norm
snake_case__ : Optional[Any] = enforce_input_projection
snake_case__ : Any = common_stride
snake_case__ : Any = ignore_value
snake_case__ : List[str] = num_queries
snake_case__ : List[str] = no_object_weight
snake_case__ : str = class_weight
snake_case__ : Any = mask_weight
snake_case__ : Dict = dice_weight
snake_case__ : Optional[Any] = train_num_points
snake_case__ : str = oversample_ratio
snake_case__ : str = importance_sample_ratio
snake_case__ : List[str] = init_std
snake_case__ : Any = init_xavier_std
snake_case__ : Any = use_auxiliary_loss
snake_case__ : Optional[int] = feature_strides
snake_case__ : List[str] = output_auxiliary_logits
snake_case__ : List[str] = decoder_layers
super().__init__(**__A )
@classmethod
def lowercase ( cls : str , __a : PretrainedConfig , **__a : int ):
return cls(
backbone_config=__A , **__A , )
def lowercase ( self : Optional[Any] ):
snake_case__ : Optional[int] = copy.deepcopy(self.__dict__ )
snake_case__ : Any = self.backbone_config.to_dict()
snake_case__ : Optional[int] = self.__class__.model_type
return output
| 648 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str , __lowercase : Tuple , __lowercase : Tuple=13 , __lowercase : List[str]=7 , __lowercase : List[str]=True , __lowercase : Any=True , __lowercase : List[str]=True , __lowercase : Optional[int]=True , __lowercase : Union[str, Any]=99 , __lowercase : str=16 , __lowercase : Optional[Any]=36 , __lowercase : Union[str, Any]=6 , __lowercase : Optional[int]=6 , __lowercase : Optional[int]=6 , __lowercase : List[str]=37 , __lowercase : Optional[int]="gelu" , __lowercase : List[str]=0.1 , __lowercase : List[str]=0.1 , __lowercase : Tuple=5_12 , __lowercase : List[str]=16 , __lowercase : int=2 , __lowercase : Tuple=0.02 , __lowercase : Optional[int]=3 , __lowercase : List[Any]=4 , __lowercase : int=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = embedding_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_hidden_groups
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Dict ):
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def snake_case__ ( self : int , __lowercase : Dict , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : int ):
"""simple docstring"""
snake_case_ = AlbertModel(config=__A )
model.to(__A )
model.eval()
snake_case_ = model(__A , attention_mask=__A , token_type_ids=__A )
snake_case_ = model(__A , token_type_ids=__A )
snake_case_ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self : Union[str, Any] , __lowercase : str , __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : List[Any] ):
"""simple docstring"""
snake_case_ = AlbertForPreTraining(config=__A )
model.to(__A )
model.eval()
snake_case_ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , sentence_order_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def snake_case__ ( self : List[Any] , __lowercase : Dict , __lowercase : int , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Any , __lowercase : Dict , __lowercase : Optional[int] ):
"""simple docstring"""
snake_case_ = AlbertForMaskedLM(config=__A )
model.to(__A )
model.eval()
snake_case_ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Optional[Any] , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = AlbertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
snake_case_ = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : str , __lowercase : Tuple , __lowercase : List[str] , __lowercase : int , __lowercase : List[Any] , __lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = AlbertForSequenceClassification(__A )
model.to(__A )
model.eval()
snake_case_ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Any , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Tuple , __lowercase : str ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = AlbertForTokenClassification(config=__A )
model.to(__A )
model.eval()
snake_case_ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Optional[Any] , __lowercase : List[Any] , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : int ):
"""simple docstring"""
snake_case_ = self.num_choices
snake_case_ = AlbertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
snake_case_
) = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
def snake_case__ ( self : Any , __lowercase : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Tuple=False ):
"""simple docstring"""
snake_case_ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
snake_case_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = AlbertModelTester(self )
snake_case_ = ConfigTester(self , config_class=__A , hidden_size=37 )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*__A )
@slow
def snake_case__ ( self : int ):
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = AlbertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = AlbertModel.from_pretrained("albert-base-v2" )
snake_case_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
snake_case_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case_ = model(__A , attention_mask=__A )[0]
snake_case_ = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __A )
snake_case_ = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) )
| 376 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase_ = '''sshleifer/student_marian_en_ro_6_1'''
UpperCamelCase_ = '''sshleifer/tiny-mbart'''
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowercase ):
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Union[str, Any]=True , ):
'''simple docstring'''
lowercase : List[Any] =self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__A , num_train_epochs=1 , distributed=__A , extra_args_str=__A , predict_with_generate=__A , do_train=__A , do_eval=__A , do_predict=__A , )
lowercase : int =TrainerState.load_from_json(os.path.join(__A , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
lowercase : Tuple =[log for log in logs if """eval_loss""" in log.keys()]
lowercase : Union[str, Any] =eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
lowercase : Tuple =eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , __A )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__A )
@require_torch_multi_gpu
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__A )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__A , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__A , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__A , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__A )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(
distributed=__A , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__A )
@require_apex
@require_torch_gpu
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__A , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__A , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
lowercase : Optional[Any] ={
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
lowercase : int =experiments[experiment_id]
lowercase : Union[str, Any] ={"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
lowercase : Optional[int] ="""Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__A , extra_args_str=data['''extra_args_str'''] )
lowercase : List[Any] =len(re.findall(__A , cl.err ) )
self.assertEqual(__A , data['''n_matches'''] )
@slow
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[str] =self.run_trainer(
eval_steps=2 , max_len=128 , model_name=__A , learning_rate=3E-4 , num_train_epochs=10 , distributed=__A , )
# Check metrics
lowercase : str =TrainerState.load_from_json(os.path.join(__A , '''trainer_state.json''' ) ).log_history
lowercase : List[str] =[log for log in logs if """eval_loss""" in log.keys()]
lowercase : int =eval_metrics[0]
lowercase : List[Any] =eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , __A )
# test if do_predict saves generations and metrics
lowercase : Any =os.listdir(__A )
lowercase : Tuple ={os.path.basename(__A ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(UpperCAmelCase__ : str ) -> Tuple[int, float]:
lowercase : List[Any] ="""--skip_memory_metrics 0"""
lowercase : Any =self.run_trainer(
max_len=128 , model_name=__A , learning_rate=3E-4 , num_train_epochs=1 , optim=__A , distributed=__A , extra_args_str=__A , do_eval=__A , do_predict=__A , n_gpus_to_use=1 , )
# Check metrics
lowercase : List[Any] =TrainerState.load_from_json(Path(__A , '''trainer_state.json''' ) ).log_history
lowercase : List[Any] =int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
lowercase : Any =int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
lowercase : Union[str, Any] =logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
lowercase : str =train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
lowercase : Any =train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
lowercase : Optional[Any] =gpu_alloc_mem_orig - gpu_alloc_mem_bnb
lowercase : Dict =gpu_peak_mem_orig + gpu_alloc_mem_orig
lowercase : List[Any] =gpu_peak_mem_bnb + gpu_alloc_mem_bnb
lowercase : Union[str, Any] =gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
lowercase : List[Any] =120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__A , __A , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__A , __A , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__A , __A , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 3E-3 , UpperCAmelCase__ : str = "adafactor" , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : str = None , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = None , ):
'''simple docstring'''
lowercase : List[Any] =self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
lowercase : Optional[int] =self.get_auto_remove_tmp_dir()
lowercase : str =F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__A )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__A )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
lowercase : Optional[int] =F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__A )}
'''.split()
lowercase : str ="""
--do_predict
""".split()
lowercase : Union[str, Any] =[]
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
lowercase : Union[str, Any] =get_gpu_count()
lowercase : Tuple =get_torch_dist_unique_port()
lowercase : int =F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
lowercase : Union[str, Any] =[sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__A , env=self.get_env() )
else:
lowercase : Any =["""run_translation.py"""] + args
with patch.object(__A , '''argv''' , __A ):
main()
return output_dir
| 92 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
a = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
a = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
a = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
a = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
a = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
a = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
a = tf.keras.preprocessing.image.img_to_array(test_image)
a = np.expand_dims(test_image, axis=0)
a = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
a = '''Normal'''
if result[0][0] == 1:
a = '''Abnormality detected''' | 518 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class _UpperCamelCase ( _lowercase ):
'''simple docstring'''
lowerCamelCase__ ='''van'''
def __init__( self : Dict , a : List[str]=224 , a : Any=3 , a : Any=[7, 3, 3, 3] , a : List[str]=[4, 2, 2, 2] , a : Optional[Any]=[64, 128, 320, 512] , a : Tuple=[3, 3, 12, 3] , a : Optional[Any]=[8, 8, 4, 4] , a : List[Any]="gelu" , a : Optional[int]=0.02 , a : Any=1e-6 , a : str=1e-2 , a : Union[str, Any]=0.0 , a : str=0.0 , **a : Dict , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Tuple = patch_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = strides
SCREAMING_SNAKE_CASE : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[int] = depths
SCREAMING_SNAKE_CASE : Union[str, Any] = mlp_ratios
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = layer_scale_init_value
SCREAMING_SNAKE_CASE : List[Any] = drop_path_rate
SCREAMING_SNAKE_CASE : int = dropout_rate | 25 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ : Optional[Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = None
_lowercase : str = None
def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
__A : List[Any] = dataset_module_factory(__A , cache_dir=__A )
__A : Tuple = import_main_class(dataset_module.module_path , dataset=__A )
__A : DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
__A : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__A : Union[str, Any] = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : List[Any] = import_main_class(dataset_module.module_path )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__A : Any = None
builder_instance.download_and_prepare()
__A : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]:
__A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : str = import_main_class(dataset_module.module_path ,dataset=a__ )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
__A : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ ,a__ )
assert "train" in ds
assert isinstance(ds["""train"""] ,a__ )
assert next(iter(ds["""train"""] ) )
| 17 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''ViTFeatureExtractor''']
UpperCAmelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 420 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
__snake_case = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def a ( __a , __a = 1 , __a = "new" , __a = None ) -> dict:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a__ ) - valid_terms ) ):
UpperCamelCase__ :Optional[int] = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(a__ )
UpperCamelCase__ :Any = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
UpperCamelCase__ :Tuple = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a__ )}
UpperCamelCase__ :str = {}
for id_ in range(a__ ):
UpperCamelCase__ :int = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext'''])) | 189 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 0 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Any = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
A : int = UniSpeechSatForSequenceClassification.from_pretrained(a__ , config=a__ )
A : Dict = downstream_dict["""projector.weight"""]
A : int = downstream_dict["""projector.bias"""]
A : Any = downstream_dict["""model.post_net.linear.weight"""]
A : Any = downstream_dict["""model.post_net.linear.bias"""]
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
A : Dict = UniSpeechSatForAudioFrameClassification.from_pretrained(a__ , config=a__ )
A : List[str] = downstream_dict["""model.linear.weight"""]
A : int = downstream_dict["""model.linear.bias"""]
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
A : Optional[Any] = UniSpeechSatForXVector.from_pretrained(a__ , config=a__ )
A : int = downstream_dict["""connector.weight"""]
A : Dict = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A : str = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
A : Tuple = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
A : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
A : Optional[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
A : Union[str, Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
A : Tuple = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
A : Union[str, Any] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
A : Dict = torch.load(a__ , map_location="""cpu""" )
A : int = checkpoint["""Downstream"""]
A : Optional[int] = UniSpeechSatConfig.from_pretrained(a__ )
A : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
a__ , return_attention_mask=a__ , do_normalize=a__ )
A : Union[str, Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
A : List[str] = convert_classification(a__ , a__ , a__ )
elif arch.endswith("""ForAudioFrameClassification""" ):
A : List[Any] = convert_diarization(a__ , a__ , a__ )
elif arch.endswith("""ForXVector""" ):
A : Tuple = convert_xvector(a__ , a__ , a__ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
A : List[str] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(a__ )
hf_model.save_pretrained(a__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Any = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
SCREAMING_SNAKE_CASE_:Dict = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 662 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case = logging.get_logger(__name__)
class _a ( _lowercase ):
a_ : List[str] = ['''pixel_values''']
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_55 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : int , ):
super().__init__(**__A )
lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 2_56}
lowerCamelCase__ = get_size_dict(__A , default_to_square=__A )
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowerCamelCase__ = get_size_dict(__A , param_name='crop_size' )
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = resample
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_rescale
lowerCamelCase__ = rescale_factor
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
lowerCamelCase__ = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCamelCase__ = get_resize_output_image_size(__A , size=size['shortest_edge'] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : str , ):
lowerCamelCase__ = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(__A , size=(size['height'], size['width']) , data_format=__A , **__A )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[float] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ = size if size is not None else self.size
lowerCamelCase__ = get_size_dict(__A , default_to_square=__A )
lowerCamelCase__ = resample if resample is not None else self.resample
lowerCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ = get_size_dict(__A , param_name='crop_size' )
lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ = image_std if image_std is not None else self.image_std
lowerCamelCase__ = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase__ = [to_numpy_array(__A ) for image in images]
if do_resize:
lowerCamelCase__ = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
lowerCamelCase__ = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
lowerCamelCase__ = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
lowerCamelCase__ = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
lowerCamelCase__ = [to_channel_dimension_format(__A , __A ) for image in images]
lowerCamelCase__ = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Tuple] = None ):
lowerCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__A ):
lowerCamelCase__ = target_sizes.numpy()
lowerCamelCase__ = []
for idx in range(len(__A ) ):
lowerCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=__A )
lowerCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
lowerCamelCase__ = logits.argmax(dim=1 )
lowerCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 510 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 | 0 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def UpperCamelCase( __UpperCamelCase : Tuple ):
lowerCAmelCase_ : Dict = checkpoints.load_tax_checkpoint(a__ )
lowerCAmelCase_ : List[str] = flatten_dict(a__ )
return flax_params
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = {}
lowerCAmelCase_ : Union[str, Any] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
lowerCAmelCase_ : Dict = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCAmelCase_ : List[Any] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCAmelCase_ : List[str] = new_key.replace(a__ ,a__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCAmelCase_ : Optional[Any] = new_key.replace(a__ ,a__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCAmelCase_ : Optional[Any] = re.sub(R'''layers_(\d+)''' ,R'''layer.\1''' ,a__ )
lowerCAmelCase_ : Any = new_key.replace('''encoder''' ,'''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCAmelCase_ : int = re.sub(R'''layers_(\d+)''' ,R'''layer.\1''' ,a__ )
lowerCAmelCase_ : Optional[int] = flax_dict[key]
lowerCAmelCase_ : Union[str, Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCAmelCase_ : Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
lowerCAmelCase_ : List[str] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[Any]=False ,__UpperCamelCase : Union[str, Any]=False ):
lowerCAmelCase_ : Any = get_flax_param(a__ )
if not use_large:
lowerCAmelCase_ : Union[str, Any] = PixaStructVisionConfig()
lowerCAmelCase_ : str = PixaStructTextConfig()
else:
lowerCAmelCase_ : List[str] = PixaStructVisionConfig(
hidden_size=1536 ,d_ff=3968 ,num_attention_heads=24 ,num_hidden_layers=18 )
lowerCAmelCase_ : int = PixaStructTextConfig(hidden_size=1536 ,d_ff=3968 ,num_heads=24 ,num_layers=18 )
lowerCAmelCase_ : Union[str, Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() ,text_config=decoder_config.to_dict() ,is_vqa=a__ )
lowerCAmelCase_ : int = PixaStructForConditionalGeneration(a__ )
lowerCAmelCase_ : str = rename_and_convert_flax_params(a__ )
model.load_state_dict(a__ )
lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCAmelCase_ : str = PixaStructImageProcessor()
lowerCAmelCase_ : Union[str, Any] = PixaStructProcessor(image_processor=a__ ,tokenizer=a__ )
if use_large:
lowerCAmelCase_ : int = 4096
lowerCAmelCase_ : Dict = True
# mkdir if needed
os.makedirs(a__ ,exist_ok=a__ )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
print('''Model saved in {}'''.format(a__ ) )
if __name__ == "__main__":
A__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
A__ : Union[str, Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 171 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
'''simple docstring'''
import os
def _snake_case ( ) -> int:
"""simple docstring"""
lowerCAmelCase = os.path.join(os.path.dirname(a__ ) , """num.txt""" )
with open(a__ ) as file_hand:
return str(sum(int(a__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 433 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowercase__ :
"""simple docstring"""
__UpperCamelCase : CommonSchedulerState
# setable values
__UpperCamelCase : jnp.ndarray
__UpperCamelCase : jnp.ndarray
__UpperCamelCase : Optional[int] = None
@classmethod
def lowercase ( cls : Any , __a : CommonSchedulerState , __a : jnp.ndarray , __a : jnp.ndarray ):
return cls(common=__A , init_noise_sigma=__A , timesteps=__A )
@dataclass
class lowercase__ (_lowercase ):
"""simple docstring"""
__UpperCamelCase : DDPMSchedulerState
class lowercase__ (_lowercase , _lowercase ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = [e.name for e in FlaxKarrasDiffusionSchedulers]
__UpperCamelCase : jnp.dtype
@property
def lowercase ( self : Tuple ):
return True
@register_to_config
def __init__( self : str , __a : int = 1_0_0_0 , __a : float = 0.0001 , __a : float = 0.02 , __a : str = "linear" , __a : Optional[jnp.ndarray] = None , __a : str = "fixed_small" , __a : bool = True , __a : str = "epsilon" , __a : jnp.dtype = jnp.floataa , ):
snake_case__ : Dict = dtype
def lowercase ( self : Optional[Any] , __a : Optional[CommonSchedulerState] = None ):
if common is None:
snake_case__ : List[str] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
snake_case__ : Any = jnp.array(1.0 , dtype=self.dtype )
snake_case__ : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__A , init_noise_sigma=__A , timesteps=__A , )
def lowercase ( self : Any , __a : DDPMSchedulerState , __a : jnp.ndarray , __a : Optional[int] = None ):
return sample
def lowercase ( self : Dict , __a : DDPMSchedulerState , __a : int , __a : Tuple = () ):
snake_case__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
snake_case__ : Dict = (jnp.arange(0 , __A ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__A , timesteps=__A , )
def lowercase ( self : Optional[int] , __a : DDPMSchedulerState , __a : List[str] , __a : Union[str, Any]=None , __a : List[str]=None ):
snake_case__ : Dict = state.common.alphas_cumprod[t]
snake_case__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case__ : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
snake_case__ : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
snake_case__ : int = jnp.clip(__A , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
snake_case__ : Union[str, Any] = jnp.log(jnp.clip(__A , a_min=1e-20 ) )
elif variance_type == "fixed_large":
snake_case__ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
snake_case__ : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
snake_case__ : Union[str, Any] = variance
snake_case__ : str = state.common.betas[t]
snake_case__ : Optional[int] = (predicted_variance + 1) / 2
snake_case__ : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def lowercase ( self : List[str] , __a : DDPMSchedulerState , __a : jnp.ndarray , __a : int , __a : jnp.ndarray , __a : Optional[jax.random.KeyArray] = None , __a : bool = True , ):
snake_case__ : Union[str, Any] = timestep
if key is None:
snake_case__ : str = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
snake_case__ : Dict = jnp.split(__A , sample.shape[1] , axis=1 )
else:
snake_case__ : Optional[int] = None
# 1. compute alphas, betas
snake_case__ : Union[str, Any] = state.common.alphas_cumprod[t]
snake_case__ : str = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
snake_case__ : str = 1 - alpha_prod_t
snake_case__ : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case__ : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case__ : Union[str, Any] = model_output
elif self.config.prediction_type == "v_prediction":
snake_case__ : List[str] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case__ : Any = jnp.clip(__A , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ : Tuple = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
snake_case__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
snake_case__ : Dict = jax.random.split(__A , num=1 )
snake_case__ : Dict = jax.random.normal(__A , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__A , __A , predicted_variance=__A ) ** 0.5) * noise
snake_case__ : Union[str, Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
snake_case__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__A , state=__A )
def lowercase ( self : Dict , __a : DDPMSchedulerState , __a : jnp.ndarray , __a : jnp.ndarray , __a : jnp.ndarray , ):
return add_noise_common(state.common , __A , __A , __A )
def lowercase ( self : Optional[int] , __a : DDPMSchedulerState , __a : jnp.ndarray , __a : jnp.ndarray , __a : jnp.ndarray , ):
return get_velocity_common(state.common , __A , __A , __A )
def __len__( self : Any ):
return self.config.num_train_timesteps
| 648 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17 | 0 |
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case_ = _modexpt(a__ , exponent // 2 , a__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(a__ , exponent - 1 , a__ )) % modulo_value
def lowerCamelCase__ ( _A = 1777 , _A = 1855 , _A = 8 ):
'''simple docstring'''
snake_case_ = base
for _ in range(1 , a__ ):
snake_case_ = _modexpt(a__ , a__ , 10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 376 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : List[str] =ort.SessionOptions()
lowercase : List[str] =False
return options
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Optional[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowercase : Dict =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowercase : List[str] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
lowercase : int =OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
lowercase : List[Any] ="""A red cat sitting on a park bench"""
lowercase : Dict =np.random.RandomState(0 )
lowercase : str =pipe(
prompt=__A , image=__A , mask_image=__A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__A , output_type='''np''' , )
lowercase : List[Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 92 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Any ) -> Optional[int]: # noqa: E741
while r - l > 1:
__A : Any = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : List[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[str] = 1
__A : List[Any] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : int = v[i]
elif v[i] > tail[length - 1]:
__A : Union[str, Any] = v[i]
length += 1
else:
__A : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[str]:
_UpperCAmelCase = 3_8_4
if "tiny" in model_name:
_UpperCAmelCase = [3, 3, 9, 3]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
_UpperCAmelCase = [3, 3, 2_7, 3]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
_UpperCAmelCase = [3, 3, 2_7, 3]
_UpperCAmelCase = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
_UpperCAmelCase = 5_1_2
if "large" in model_name:
_UpperCAmelCase = [3, 3, 2_7, 3]
_UpperCAmelCase = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
_UpperCAmelCase = 7_6_8
if "xlarge" in model_name:
_UpperCAmelCase = [3, 3, 2_7, 3]
_UpperCAmelCase = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
_UpperCAmelCase = 1_0_2_4
# set label information
_UpperCAmelCase = 1_5_0
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """ade20k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type="""dataset""" ) , """r""" ) )
_UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = ConvNextConfig(
depths=a__ , hidden_sizes=a__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
_UpperCAmelCase = UperNetConfig(
backbone_config=a__ , auxiliary_in_channels=a__ , num_labels=a__ , idalabel=a__ , labelaid=a__ , )
return config
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Dict:
_UpperCAmelCase = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.{j}.gamma", f"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.weight", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.bias", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.norm.weight", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.norm.bias", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((f"backbone.downsample_layers.{i}.0.weight", f"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((f"backbone.downsample_layers.{i}.0.bias", f"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((f"backbone.downsample_layers.{i}.1.weight", f"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((f"backbone.downsample_layers.{i}.1.bias", f"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> int:
_UpperCAmelCase = dct.pop(a__ )
_UpperCAmelCase = val
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> int:
_UpperCAmelCase = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
_UpperCAmelCase = model_name_to_url[model_name]
_UpperCAmelCase = torch.hub.load_state_dict_from_url(a__ , map_location="""cpu""" )["""state_dict"""]
_UpperCAmelCase = get_upernet_config(a__ )
_UpperCAmelCase = UperNetForSemanticSegmentation(a__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_UpperCAmelCase = state_dict.pop(a__ )
if "bn" in key:
_UpperCAmelCase = key.replace("""bn""" , """batch_norm""" )
_UpperCAmelCase = val
# rename keys
_UpperCAmelCase = create_rename_keys(a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
model.load_state_dict(a__ )
# verify on image
_UpperCAmelCase = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
_UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ).convert("""RGB""" )
_UpperCAmelCase = SegformerImageProcessor()
_UpperCAmelCase = processor(a__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
_UpperCAmelCase = model(a__ )
if model_name == "upernet-convnext-tiny":
_UpperCAmelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
_UpperCAmelCase = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
_UpperCAmelCase = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
_UpperCAmelCase = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
_UpperCAmelCase = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , a__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(a__ )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[F'upernet-convnext-{size}' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 518 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class _UpperCamelCase ( _lowercase ):
'''simple docstring'''
lowerCamelCase__ ='''open-llama'''
def __init__( self : Union[str, Any] , a : Dict=10_0000 , a : Optional[int]=4096 , a : str=1_1008 , a : str=32 , a : Union[str, Any]=32 , a : str="silu" , a : Tuple=2048 , a : Optional[Any]=0.02 , a : List[Any]=1e-6 , a : Dict=True , a : Optional[Any]=0 , a : str=1 , a : Any=2 , a : Optional[int]=False , a : List[Any]=True , a : Any=0.1 , a : int=0.1 , a : Optional[int]=True , a : Optional[int]=True , a : Optional[Any]=None , **a : str , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = rms_norm_eps
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop(
"use_memorry_efficient_attention" , __A )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout_prob
SCREAMING_SNAKE_CASE : str = use_stable_embedding
SCREAMING_SNAKE_CASE : Dict = shared_input_output_embedding
SCREAMING_SNAKE_CASE : List[str] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A , )
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE : Dict = self.rope_scaling.get("type" , __A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rope_scaling.get("factor" , __A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__A , __A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 25 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : Optional[int] ) -> int:
_UpperCamelCase = get_activation('''swish''' )
self.assertIsInstance(__A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
_UpperCamelCase = get_activation('''silu''' )
self.assertIsInstance(__A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _UpperCamelCase ( self : Tuple ) -> str:
_UpperCamelCase = get_activation('''mish''' )
self.assertIsInstance(__A , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _UpperCamelCase ( self : int ) -> Any:
_UpperCamelCase = get_activation('''gelu''' )
self.assertIsInstance(__A , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 420 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 189 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]:
# Initialise PyTorch model
__A : Dict = MobileBertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__A : Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
__A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 17 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : str = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A : str = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A : Union[str, Any] = InstructBlipProcessor(__A, __A, __A )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **__A ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **__A ).image_processor
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **__A ).qformer_tokenizer
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : List[Any] = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(__A, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : Any = InstructBlipProcessor(
tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor(), qformer_tokenizer=self.get_qformer_tokenizer(), )
processor.save_pretrained(self.tmpdirname )
A : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=__A, padding_value=1.0 )
A : Dict = InstructBlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=__A, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, __A )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, __A )
self.assertIsInstance(processor.qformer_tokenizer, __A )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.get_image_processor()
A : List[str] = self.get_tokenizer()
A : int = self.get_qformer_tokenizer()
A : Optional[Any] = InstructBlipProcessor(
tokenizer=__A, image_processor=__A, qformer_tokenizer=__A )
A : List[str] = self.prepare_image_inputs()
A : Dict = image_processor(__A, return_tensors="""np""" )
A : Dict = processor(images=__A, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.get_image_processor()
A : Any = self.get_tokenizer()
A : Any = self.get_qformer_tokenizer()
A : str = InstructBlipProcessor(
tokenizer=__A, image_processor=__A, qformer_tokenizer=__A )
A : Dict = """lower newer"""
A : Dict = processor(text=__A )
A : int = tokenizer(__A, return_token_type_ids=__A )
A : Any = qformer_tokenizer(__A, return_token_type_ids=__A )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key], encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key], encoded_processor["""qformer_""" + key] )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.get_image_processor()
A : Any = self.get_tokenizer()
A : Optional[Any] = self.get_qformer_tokenizer()
A : str = InstructBlipProcessor(
tokenizer=__A, image_processor=__A, qformer_tokenizer=__A )
A : List[Any] = """lower newer"""
A : int = self.prepare_image_inputs()
A : Union[str, Any] = processor(text=__A, images=__A )
self.assertListEqual(
list(inputs.keys() ), ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""], )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _lowerCAmelCase ( self ):
A : Dict = self.get_image_processor()
A : Optional[Any] = self.get_tokenizer()
A : List[Any] = self.get_qformer_tokenizer()
A : Union[str, Any] = InstructBlipProcessor(
tokenizer=__A, image_processor=__A, qformer_tokenizer=__A )
A : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Dict = processor.batch_decode(__A )
A : Union[str, Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A, __A )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : Union[str, Any] = self.get_tokenizer()
A : Tuple = self.get_qformer_tokenizer()
A : Union[str, Any] = InstructBlipProcessor(
tokenizer=__A, image_processor=__A, qformer_tokenizer=__A )
A : int = """lower newer"""
A : Optional[int] = self.prepare_image_inputs()
A : List[str] = processor(text=__A, images=__A )
self.assertListEqual(
list(inputs.keys() ), ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""], )
| 662 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_snake_case = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 510 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 0 |
def UpperCamelCase( __UpperCamelCase : int = 100 ):
lowerCAmelCase_ : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
lowerCAmelCase_ : List[str] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 171 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 433 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_: Any = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: Dict = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: Optional[Any] = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: str = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowercase_: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 648 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase__ : List[Any] = {
# 1536-bit
5: {
'''prime''': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
'''generator''': 2,
},
}
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __lowercase : int = 14 ):
"""simple docstring"""
if group not in primes:
raise ValueError("Unsupported Group" )
snake_case_ = primes[group]["""prime"""]
snake_case_ = primes[group]["""generator"""]
snake_case_ = int(hexlify(urandom(32 ) ) , base=16 )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
return hex(self.__private_key )[2:]
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = pow(self.generator , self.__private_key , self.prime )
return hex(__A )[2:]
def snake_case__ ( self : str , __lowercase : int ):
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(__A , (self.prime - 1) // 2 , self.prime ) == 1
)
def snake_case__ ( self : Optional[int] , __lowercase : str ):
"""simple docstring"""
snake_case_ = int(__A , base=16 )
if not self.is_valid_public_key(__A ):
raise ValueError("Invalid public key" )
snake_case_ = pow(__A , self.__private_key , self.prime )
return shaaaa(str(__A ).encode() ).hexdigest()
@staticmethod
def snake_case__ ( __lowercase : int , __lowercase : int ):
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(__A , (prime - 1) // 2 , __A ) == 1
)
@staticmethod
def snake_case__ ( __lowercase : str , __lowercase : str , __lowercase : int = 14 ):
"""simple docstring"""
snake_case_ = int(__A , base=16 )
snake_case_ = int(__A , base=16 )
snake_case_ = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(__A , __A ):
raise ValueError("Invalid public key" )
snake_case_ = pow(__A , __A , __A )
return shaaaa(str(__A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] ) -> List[Any]:
lowercase : List[str] =os.path.abspath(a__ )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
lowercase : Optional[Any] =tf.train.list_variables(a__ )
lowercase : Union[str, Any] =[]
lowercase : List[str] =[]
lowercase : List[Any] =[]
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase : List[str] =full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase : int =name[1:]
# figure out how many levels deep the name is
lowercase : Optional[Any] =0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(a__ )
# read data
lowercase : Optional[Any] =tf.train.load_variable(a__ , a__ )
names.append('''/'''.join(a__ ) )
arrays.append(a__ )
logger.info(f'''Read a total of {len(a__ ):,} layers''' )
# Sanity check
if len(set(a__ ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(a__ ) )})''' )
lowercase : Union[str, Any] =list(set(a__ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(a__ , a__ ):
lowercase : Optional[Any] =full_name.split('''/''' )
lowercase : Any =model
lowercase : List[str] =[]
for i, m_name in enumerate(a__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
lowercase : Tuple =int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
lowercase : Any =getattr(a__ , '''embeddings''' )
lowercase : int =getattr(a__ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
lowercase : Dict =getattr(a__ , '''encoder''' )
lowercase : Dict =getattr(a__ , '''layer''' )
lowercase : Optional[Any] =pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
lowercase : List[str] =getattr(a__ , '''pooler''' )
lowercase : str =getattr(a__ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
lowercase : Optional[Any] =getattr(a__ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
lowercase : Any =getattr(a__ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
lowercase : Optional[int] =getattr(a__ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
lowercase : List[Any] =getattr(a__ , '''token_type_embeddings''' )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append('''weight''' )
lowercase : int =getattr(a__ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
lowercase : Union[str, Any] =getattr(a__ , '''attention''' )
lowercase : List[Any] =getattr(a__ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
lowercase : List[str] =getattr(a__ , '''attention''' )
lowercase : int =getattr(a__ , '''output''' )
lowercase : List[Any] =getattr(a__ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
lowercase : Union[str, Any] =getattr(a__ , '''attention''' )
lowercase : List[str] =getattr(a__ , '''output''' )
lowercase : Optional[Any] =getattr(a__ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
lowercase : List[Any] =getattr(a__ , '''output''' )
lowercase : int =getattr(a__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
lowercase : int =getattr(a__ , '''output''' )
lowercase : Optional[int] =getattr(a__ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
lowercase : Union[str, Any] =getattr(a__ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
lowercase : str =getattr(a__ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
lowercase : List[str] =getattr(a__ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
lowercase : str =getattr(a__ , '''intermediate''' )
lowercase : Union[str, Any] =getattr(a__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
lowercase : Optional[Any] =getattr(a__ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
lowercase : int =getattr(a__ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
lowercase : Any =getattr(a__ , '''weight''' )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
lowercase : List[Any] =""".""".join(a__ )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , a__ ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , a__ ):
lowercase : int =array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase : int =array.transpose()
if pointer.shape == array.shape:
lowercase : Dict =torch.from_numpy(a__ )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Optional[int] ) -> Optional[Any]:
# Instantiate model
logger.info(f'''Loading model based on config from {config_path}...''' )
lowercase : Tuple =BertConfig.from_json_file(a__ )
lowercase : Optional[int] =BertModel(a__ )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(a__ , a__ , a__ )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
UpperCamelCase_ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 92 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
_UpperCAmelCase = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
_UpperCAmelCase = """A painting of a squirrel eating a burger"""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ):
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
_UpperCAmelCase = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
_UpperCAmelCase = """A painting of a squirrel eating a burger"""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def UpperCAmelCase ( self ):
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
_UpperCAmelCase = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
_UpperCAmelCase = """A painting of a squirrel eating a burger"""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 518 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Tuple , a : List[Any] , a : List[Any]=13 , a : List[str]=30 , a : Optional[int]=2 , a : Optional[Any]=3 , a : Optional[Any]=True , a : List[Any]=True , a : Dict=32 , a : List[str]=5 , a : Tuple=4 , a : Dict=37 , a : List[Any]="gelu" , a : str=0.1 , a : Dict=0.1 , a : Optional[Any]=10 , a : Optional[int]=0.02 , a : int=3 , a : Union[str, Any]=None , a : Union[str, Any]=2 , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
SCREAMING_SNAKE_CASE : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE : Tuple = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : int = num_patches + 2
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase ( self : str , a : Dict , a : List[str] , a : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = DeiTModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : str , a : Dict , a : int , a : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = DeiTForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE : int = model(__A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Tuple = DeiTForMaskedImageModeling(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(__A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase ( self : int , a : int , a : Any , a : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = DeiTForImageClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = DeiTForImageClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DeiTModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__A )
SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Dict , a : Optional[Any]=False ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__A )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE : List[str] = model_class(__A )
model.to(__A )
model.train()
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(__A , __A , return_labels=__A )
SCREAMING_SNAKE_CASE : Any = model(**__A ).loss
loss.backward()
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE : Any = model_class(__A )
model.gradient_checkpointing_enable()
model.to(__A )
model.train()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__A , __A , return_labels=__A )
SCREAMING_SNAKE_CASE : int = model(**__A ).loss
loss.backward()
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__A ),
*get_values(__A ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE : Tuple = problem_type["""title"""]
SCREAMING_SNAKE_CASE : Optional[int] = problem_type["""num_labels"""]
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(__A )
model.to(__A )
model.train()
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(__A , __A , return_labels=__A )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE : int = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
SCREAMING_SNAKE_CASE : Optional[Any] = inputs["""labels"""].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__A ) as warning_list:
SCREAMING_SNAKE_CASE : List[str] = model(**__A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = DeiTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
__A )
SCREAMING_SNAKE_CASE : int = self.default_image_processor
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**__A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
SCREAMING_SNAKE_CASE : str = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=__A , return_tensors="pt" )
SCREAMING_SNAKE_CASE : List[Any] = inputs.pixel_values.to(__A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(__A ) | 25 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ : Optional[Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = None
_lowercase : str = None
def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
__A : List[Any] = dataset_module_factory(__A , cache_dir=__A )
__A : Tuple = import_main_class(dataset_module.module_path , dataset=__A )
__A : DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
__A : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__A : Union[str, Any] = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : List[Any] = import_main_class(dataset_module.module_path )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__A : Any = None
builder_instance.download_and_prepare()
__A : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]:
__A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : str = import_main_class(dataset_module.module_path ,dataset=a__ )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
__A : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ ,a__ )
assert "train" in ds
assert isinstance(ds["""train"""] ,a__ )
assert next(iter(ds["""train"""] ) )
| 17 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowercase ( a__ : Any ) -> Any:
_UpperCamelCase = FileLock(str(tmpdir / '''foo.lock''' ) )
_UpperCamelCase = FileLock(str(tmpdir / '''foo.lock''' ) )
_UpperCamelCase = 0.01
with locka.acquire():
with pytest.raises(a__ ):
_UpperCamelCase = time.time()
locka.acquire(a__ )
assert time.time() - _start > timeout
def lowercase ( a__ : Optional[int] ) -> Tuple:
_UpperCamelCase = """a""" * 1000 + """.lock"""
_UpperCamelCase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(a__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
_UpperCamelCase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a__ ):
locka.acquire(0 )
| 420 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
_a = XLNetTokenizer
_a = XLNetTokenizerFast
_a = True
_a = True
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ :int = XLNetTokenizer(__A , keep_accents=__A )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = """<s>"""
UpperCamelCase__ :Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(__A ) , 1006 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = XLNetTokenizer(__A , keep_accents=__A )
UpperCamelCase__ :Optional[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] )
UpperCamelCase__ :Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCamelCase__ :List[str] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
UpperCamelCase__ :str = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = XLNetTokenizer(__A , do_lower_case=__A )
UpperCamelCase__ :str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = XLNetTokenizer(__A , do_lower_case=__A )
UpperCamelCase__ :Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
UpperCamelCase__ :Any = tokenizer.encode('''sequence builders''' , add_special_tokens=__A )
UpperCamelCase__ :Optional[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A )
UpperCamelCase__ :List[str] = tokenizer.build_inputs_with_special_tokens(__A )
UpperCamelCase__ :Any = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = {"""input_ids""": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , ) | 189 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_:List[Any] = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = GPTSwaTokenizer
__lowerCamelCase : Dict = False
__lowerCamelCase : Tuple = True
__lowerCamelCase : Union[str, Any] = False
def _lowerCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A : Tuple = GPTSwaTokenizer(__A, eos_token="""<unk>""", bos_token="""<unk>""", pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[Any] = """This is a test"""
A : str = """This is a test"""
return input_text, output_text
def _lowerCAmelCase ( self ):
A : List[str] = """<s>"""
A : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ), __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ), __A )
def _lowerCAmelCase ( self ):
A : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<unk>""" )
self.assertEqual(vocab_keys[1], """<s>""" )
self.assertEqual(vocab_keys[-1], """j""" )
self.assertEqual(len(__A ), 2000 )
def _lowerCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size, 2000 )
def _lowerCAmelCase ( self ):
A : Optional[int] = GPTSwaTokenizer(__A )
A : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [465, 287, 265, 631, 842] )
A : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
__A, ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""], )
# fmt: on
A : Optional[int] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A, [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], )
A : Union[str, Any] = tokenizer.convert_ids_to_tokens(__A )
# fmt: off
self.assertListEqual(
__A, ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def _lowerCAmelCase ( self ):
A : Union[str, Any] = GPTSwaTokenizer(__A )
A : Optional[int] = ["""This is a test""", """I was born in 92000, and this is falsé."""]
A : Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__A, __A ):
self.assertListEqual(tokenizer.encode_fast(__A ), __A )
# Test that decode_fast returns the input text
for text, token_ids in zip(__A, __A ):
self.assertEqual(tokenizer.decode_fast(__A ), __A )
@slow
def _lowerCAmelCase ( self ):
A : Any = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
A : Any = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A, model_name="""AI-Sweden/gpt-sw3-126m""", sequences=__A, )
| 662 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
self.assertEqual(len(__A ) , len(__A ) )
for a, b in zip(__A , __A ):
self.assertAlmostEqual(__A , __A , delta=__A )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__A ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = None
ops.enable_eager_execution_internal()
lowerCamelCase__ = tf.config.list_physical_devices('CPU' )
if len(__A ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase__ = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase__ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase__ = GradientAccumulator()
lowerCamelCase__ = tf.Variable([4.0, 3.0] )
lowerCamelCase__ = create_optimizer(5e-5 , 10 , 5 )
lowerCamelCase__ = tf.Variable([0.0, 0.0] , trainable=__A )
def accumulate_on_replica(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
with strategy.scope():
lowerCamelCase__ = strategy.experimental_local_results(__A )
local_variables[0].assign(__A )
local_variables[1].assign(__A )
strategy.run(__A , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__A )
def _check_local_values(SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , __A , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , __A , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 510 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 | 0 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __snake_case ( _lowercase ):
_a = '''EncodecFeatureExtractor'''
_a = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , A_ : Any , A_ : Tuple):
super().__init__(__A , __A)
lowerCAmelCase_ : Dict = self.feature_extractor
lowerCAmelCase_ : List[str] = False
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : str=None , A_ : Tuple=None , A_ : Dict=True):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A)
def __call__( self : Optional[Any] , *A_ : Tuple , **A_ : Tuple):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A)
lowerCAmelCase_ : str = kwargs.pop('''audio''' , __A)
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''sampling_rate''' , __A)
lowerCAmelCase_ : int = kwargs.pop('''text''' , __A)
if len(__A) > 0:
lowerCAmelCase_ : int = args[0]
lowerCAmelCase_ : Dict = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if text is not None:
lowerCAmelCase_ : Dict = self.tokenizer(__A , **__A)
if audio is not None:
lowerCAmelCase_ : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase_ : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase_ : int = audio_inputs["""padding_mask"""]
return inputs
def UpperCAmelCase__ ( self : List[str] , *A_ : int , **A_ : Tuple):
lowerCAmelCase_ : Optional[int] = kwargs.pop('''audio''' , __A)
lowerCAmelCase_ : List[str] = kwargs.pop('''padding_mask''' , __A)
if len(__A) > 0:
lowerCAmelCase_ : Dict = args[0]
lowerCAmelCase_ : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A)
else:
return self.tokenizer.batch_decode(*__A , **__A)
def UpperCAmelCase__ ( self : Optional[Any] , *A_ : Dict , **A_ : Any):
return self.tokenizer.decode(*__A , **__A)
def UpperCAmelCase__ ( self : Tuple , A_ : Union[str, Any] , A_ : Optional = None):
lowerCAmelCase_ : List[str] = to_numpy(__A)
lowerCAmelCase_ : Tuple = audio_values.shape
if padding_mask is None:
return list(__A)
lowerCAmelCase_ : Union[str, Any] = to_numpy(__A)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCAmelCase_ : Tuple = 1 - self.feature_extractor.padding_value
lowerCAmelCase_ : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , '''constant''' , constant_values=__A)
lowerCAmelCase_ : int = audio_values.tolist()
for i in range(__A):
lowerCAmelCase_ : str = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase_ : List[Any] = sliced_audio.reshape(__A , -1)
return audio_values
| 171 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
UpperCAmelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
UpperCAmelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
UpperCAmelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 433 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 | 0 |
import math
class lowercase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : List[str]=0 ): # a graph with Node 0,1,...,N-1
snake_case__ : List[str] = n
snake_case__ : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
snake_case__ : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowercase ( self : str , __a : Union[str, Any] , __a : Any , __a : Optional[int] ):
snake_case__ : List[Any] = w
def lowercase ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
snake_case__ : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowercase ( self : int , __a : List[str] , __a : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
lowercase_: Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 648 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase__ : Optional[int] = 16
lowercase__ : List[Any] = 32
def lowerCamelCase__ ( _A , _A = 16 , _A = "bert-base-cased" ):
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained(a__ )
snake_case_ = load_dataset("glue" , "mrpc" )
def tokenize_function(_A ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a__ , max_length=a__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ = datasets.map(
a__ , batched=a__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=a__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a__ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(a__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets["train"] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
snake_case_ = DataLoader(
tokenized_datasets["validation"] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config["""lr"""]
snake_case_ = int(config["num_epochs"] )
snake_case_ = int(config["seed"] )
snake_case_ = int(config["batch_size"] )
snake_case_ = args.model_name_or_path
set_seed(a__ )
snake_case_ = get_dataloaders(a__ , a__ , a__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained(a__ , return_dict=a__ )
# Instantiate optimizer
snake_case_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case_ = optimizer_cls(params=model.parameters() , lr=a__ )
if accelerator.state.deepspeed_plugin is not None:
snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
snake_case_ = 1
snake_case_ = (len(a__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case_ = get_linear_schedule_with_warmup(
optimizer=a__ , num_warmup_steps=0 , num_training_steps=a__ , )
else:
snake_case_ = DummyScheduler(a__ , total_num_steps=a__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ )
# We need to keep track of how many total steps we have iterated over
snake_case_ = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case_ = 0
# Now we train the model
snake_case_ = evaluate.load("glue" , "mrpc" )
snake_case_ = 0
snake_case_ = {}
for epoch in range(a__ , a__ ):
model.train()
for step, batch in enumerate(a__ ):
snake_case_ = model(**a__ )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(a__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
snake_case_ = 0
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**a__ )
snake_case_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case_ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(a__ ) - 1:
snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=a__ , references=a__ , )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , a__ )
snake_case_ = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
snake_case_ = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(a__ , a__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=a__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=a__ , )
parser.add_argument(
"--output_dir" , type=a__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=a__ , default=a__ , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=a__ , default=3 , help="Number of train epochs." , )
snake_case_ = parser.parse_args()
snake_case_ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(a__ , a__ )
if __name__ == "__main__":
main()
| 376 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
UpperCamelCase_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class __SCREAMING_SNAKE_CASE ( _lowercase ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = '''left'''
def __init__( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int="<s>" , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : Any="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Optional[Any]="<cls>" , UpperCAmelCase__ : Dict="<mask>" , UpperCAmelCase__ : str=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Any , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Union[str, Any] =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
lowercase : Optional[int] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
lowercase : int =3
lowercase : List[str] =do_lower_case
lowercase : Optional[Any] =remove_space
lowercase : Any =keep_accents
lowercase : int =vocab_file
lowercase : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : int ={self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.__dict__.copy()
lowercase : Dict =None
return state
def __setstate__( self : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : str =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : List[Any] ={}
lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
if self.remove_space:
lowercase : str =""" """.join(inputs.strip().split() )
else:
lowercase : Union[str, Any] =inputs
lowercase : Optional[int] =outputs.replace('''``''' , '''\"''' ).replace('''\'\'''' , '''\"''' )
if not self.keep_accents:
lowercase : str =unicodedata.normalize('''NFKD''' , __A )
lowercase : Union[str, Any] ="""""".join([c for c in outputs if not unicodedata.combining(__A )] )
if self.do_lower_case:
lowercase : Optional[Any] =outputs.lower()
return outputs
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : Dict =self.preprocess_text(__A )
lowercase : str =self.sp_model.encode(__A , out_type=__A )
lowercase : Any =[]
for piece in pieces:
if len(__A ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase : Dict =self.sp_model.EncodeAsPieces(piece[:-1].replace(__A , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase : List[str] =cur_pieces[1:]
else:
lowercase : int =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__A )
else:
new_pieces.append(__A )
return new_pieces
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int ):
'''simple docstring'''
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(__A )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : Optional[Any] ="""""".join(__A ).replace(__A , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase : Any =kwargs.pop('''use_source_tokenizer''' , __A )
lowercase : str =self.convert_ids_to_tokens(__A , skip_special_tokens=__A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowercase : Union[str, Any] =[]
lowercase : Optional[int] =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__A ) )
lowercase : Any =[]
sub_texts.append(__A )
else:
current_sub_text.append(__A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowercase : int ="""""".join(__A )
lowercase : List[Any] =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowercase : int =self.clean_up_tokenization(__A )
return clean_text
else:
return text
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : Union[str, Any] =[self.sep_token_id]
lowercase : Any =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is not None:
return ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1, 1]
return ([0] * len(__A )) + [1, 1]
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Union[str, Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Dict =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
lowercase : List[str] =self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 92 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Any ) -> Optional[int]: # noqa: E741
while r - l > 1:
__A : Any = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : List[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[str] = 1
__A : List[Any] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : int = v[i]
elif v[i] > tail[length - 1]:
__A : Union[str, Any] = v[i]
length += 1
else:
__A : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case = "x" , snake_case = 1_0**-1_0 , snake_case = 1 , ) -> complex:
_UpperCAmelCase = symbols(a__ )
_UpperCAmelCase = lambdify(a__ , a__ )
_UpperCAmelCase = lambdify(a__ , diff(a__ , a__ ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(a__ ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}') | 518 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 | 0 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _UpperCamelCase ( _lowercase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__A , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(__A , "num_encoder_blocks" ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , a : str , a : str=13 , a : str=64 , a : Any=3 , a : List[str]=4 , a : Union[str, Any]=[2, 2, 2, 2] , a : Any=[8, 4, 2, 1] , a : int=[16, 32, 64, 128] , a : List[str]=[1, 4, 8, 16] , a : Optional[int]=[1, 2, 4, 8] , a : Optional[int]=True , a : List[Any]=True , a : List[Any]="gelu" , a : Union[str, Any]=0.1 , a : Tuple=0.1 , a : Optional[int]=0.02 , a : Dict=3 , a : Any=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[Any] = num_encoder_blocks
SCREAMING_SNAKE_CASE : List[Any] = sr_ratios
SCREAMING_SNAKE_CASE : Optional[int] = depths
SCREAMING_SNAKE_CASE : str = hidden_sizes
SCREAMING_SNAKE_CASE : str = downsampling_rates
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : List[Any] = scope
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Dict , a : Dict , a : List[Any] , a : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = SegformerModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(__A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __UpperCamelCase ( self : List[str] , a : int , a : Tuple , a : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = SegformerForSemanticSegmentation(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE : int = model(__A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = SegformerForSemanticSegmentation(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__A )
SCREAMING_SNAKE_CASE : int = model(__A , labels=__A )
self.parent.assertGreater(result.loss , 0.0 )
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = SegformerModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerConfigTester(self , config_class=__A )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__A )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__A )
@unittest.skip("SegFormer does not use inputs_embeds" )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(__A )
SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : List[str] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**self._prepare_for_class(__A , __A ) )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.attentions
SCREAMING_SNAKE_CASE : List[str] = sum(self.model_tester.depths )
self.assertEqual(len(__A ) , __A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(__A , __A ) )
SCREAMING_SNAKE_CASE : str = outputs.attentions
self.assertEqual(len(__A ) , __A )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE : Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
SCREAMING_SNAKE_CASE : Tuple = (self.model_tester.image_size // 32) ** 2
SCREAMING_SNAKE_CASE : Optional[int] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = len(__A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Any = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 1 , len(__A ) )
SCREAMING_SNAKE_CASE : List[Any] = outputs.attentions
self.assertEqual(len(__A ) , __A )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE : Tuple = (self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE : Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(a : Optional[int] , a : int , a : Dict ):
SCREAMING_SNAKE_CASE : List[str] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**self._prepare_for_class(__A , __A ) )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE : Dict = self.model_tester.num_encoder_blocks
self.assertEqual(len(__A ) , __A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(__A , __A , __A )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ):
continue
SCREAMING_SNAKE_CASE : List[str] = model_class(__A )
model.to(__A )
model.train()
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(__A , __A , return_labels=__A )
SCREAMING_SNAKE_CASE : Optional[int] = model(**__A ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = SegformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
__A )
SCREAMING_SNAKE_CASE : int = prepare_img()
SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=__A , return_tensors="pt" )
SCREAMING_SNAKE_CASE : Any = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(__A )
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __A )
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __A , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A )
SCREAMING_SNAKE_CASE : Tuple = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(__A )
SCREAMING_SNAKE_CASE : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE : int = image_processor(images=__A , return_tensors="pt" )
SCREAMING_SNAKE_CASE : List[Any] = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(__A )
SCREAMING_SNAKE_CASE : int = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __A , atol=1e-1 ) )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A )
SCREAMING_SNAKE_CASE : List[Any] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
__A )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : str = image_processor(images=__A , return_tensors="pt" )
SCREAMING_SNAKE_CASE : int = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(__A )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=__A , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE : Tuple = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __A )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__A )
SCREAMING_SNAKE_CASE : Dict = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , __A ) | 25 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 0 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
UpperCAmelCase = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
UpperCAmelCase = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
UpperCAmelCase = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def _UpperCamelCase ( self : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Any=None , __UpperCamelCase : str=1 , __UpperCamelCase : int="binary" , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]="warn" , ) -> List[Any]:
_UpperCamelCase = recall_score(
__A , __A , labels=__A , pos_label=__A , average=__A , sample_weight=__A , zero_division=__A , )
return {"recall": float(__A ) if score.size == 1 else score}
| 420 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17 | 0 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__snake_case = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
__snake_case = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
__snake_case = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="auto" , UpperCamelCase_=-1 , UpperCamelCase_=0.9 , UpperCamelCase_=5 , UpperCamelCase_=500 , UpperCamelCase_="gpt2-large" , UpperCamelCase_=-1 , UpperCamelCase_=1024 , UpperCamelCase_=25 , UpperCamelCase_=5 , UpperCamelCase_=True , UpperCamelCase_=25 , ):
'''simple docstring'''
UpperCamelCase__ :Any = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out | 189 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]:
# Initialise PyTorch model
__A : Dict = MobileBertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__A : Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
__A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 17 | 0 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
SCREAMING_SNAKE_CASE_:Optional[Any] = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s
SCREAMING_SNAKE_CASE_:Union[str, Any] = 3E8 # unit of c : m * s^-1
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
A : List[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
A : Tuple = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
A : Optional[int] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def snake_case ( _a: int , _a: int , _a: Dict )-> Dict:
'''simple docstring'''
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = val
def snake_case ( _a: List[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase__ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
return new_state_dict
def snake_case ( _a: int , _a: int=False )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = """"""
if is_panoptic:
lowerCamelCase__ = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
lowerCamelCase__ = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[:256, :]
lowerCamelCase__ = in_proj_bias[:256]
lowerCamelCase__ = in_proj_weight[256:512, :]
lowerCamelCase__ = in_proj_bias[256:512]
lowerCamelCase__ = in_proj_weight[-256:, :]
lowerCamelCase__ = in_proj_bias[-256:]
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def snake_case ( _a: Tuple , _a: int )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCamelCase__ = """resnet101"""
if "dc5" in model_name:
lowerCamelCase__ = True
lowerCamelCase__ = """panoptic""" in model_name
if is_panoptic:
lowerCamelCase__ = 250
else:
lowerCamelCase__ = 91
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """coco-detection-id2label.json"""
lowerCamelCase__ = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ = {int(a__ ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
# load image processor
lowerCamelCase__ = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowerCamelCase__ = ConditionalDetrImageProcessor(format=a__ )
# prepare image
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=a__ , return_tensors='pt' )
lowerCamelCase__ = encoding["""pixel_values"""]
logger.info(F'Converting model {model_name}...' )
# load original model from torch hub
lowerCamelCase__ = torch.hub.load('DeppMeng/ConditionalDETR' , a__ , pretrained=a__ ).eval()
lowerCamelCase__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCamelCase__ = """conditional_detr.""" + src
rename_key(a__ , a__ , a__ )
lowerCamelCase__ = rename_backbone_keys(a__ )
# query, key and value matrices need special treatment
read_in_q_k_v(a__ , is_panoptic=a__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase__ = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = val
# finally, create HuggingFace model and load state dict
lowerCamelCase__ = ConditionalDetrForSegmentation(a__ ) if is_panoptic else ConditionalDetrForObjectDetection(a__ )
model.load_state_dict(a__ )
model.eval()
model.push_to_hub(repo_id=a__ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
lowerCamelCase__ = conditional_detr(a__ )
lowerCamelCase__ = model(a__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_snake_case = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 510 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase( ):
lowerCAmelCase_ : List[Any] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' ,type=a__ ,default=1 ,help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' ,type=a__ ,help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) ,)
# rest from the training program
parser.add_argument('''training_script_args''' ,nargs=a__ )
return parser.parse_args()
def UpperCamelCase( ):
lowerCAmelCase_ : Union[str, Any] = parse_args()
# Import training_script as a module.
lowerCAmelCase_ : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase_ : str = script_fpath.stem
lowerCAmelCase_ : int = importlib.import_module(a__ )
# Patch sys.argv
lowerCAmelCase_ : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 171 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 0 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCAmelCase = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
lowerCAmelCase = EfficientNetConfig()
lowerCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""]
lowerCAmelCase = CONFIG_MAP[model_name]["""width_coef"""]
lowerCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""]
lowerCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
lowerCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""]
lowerCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""]
lowerCAmelCase = """huggingface/label-files"""
lowerCAmelCase = """imagenet-1k-id2label.json"""
lowerCAmelCase = 1_000
lowerCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase = {int(a__ ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _snake_case ( ) -> int:
"""simple docstring"""
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
def _snake_case ( _SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
lowerCAmelCase = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=a__ , )
return preprocessor
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
lowerCAmelCase = sorted(set(a__ ) )
lowerCAmelCase = len(a__ )
lowerCAmelCase = {b: str(a__ ) for b, i in zip(a__ , range(a__ ) )}
lowerCAmelCase = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
lowerCAmelCase = block_name_mapping[b]
rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
lowerCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCAmelCase = """efficientnet.""" + item[1]
lowerCAmelCase = """classifier.weight"""
lowerCAmelCase = """classifier.bias"""
return key_mapping
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCAmelCase = torch.from_numpy(a__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowerCAmelCase = torch.from_numpy(a__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowerCAmelCase = torch.from_numpy(np.transpose(a__ ) )
else:
lowerCAmelCase = torch.from_numpy(a__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(a__ )
@torch.no_grad()
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = model_classes[model_name](
include_top=a__ , weights="""imagenet""" , input_tensor=a__ , input_shape=a__ , pooling=a__ , classes=1_000 , classifier_activation="""softmax""" , )
lowerCAmelCase = original_model.trainable_variables
lowerCAmelCase = original_model.non_trainable_variables
lowerCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCAmelCase = param.numpy()
lowerCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
lowerCAmelCase = get_efficientnet_config(a__ )
lowerCAmelCase = EfficientNetForImageClassification(a__ ).eval()
lowerCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
lowerCAmelCase = rename_keys(a__ )
replace_params(a__ , a__ , a__ )
# Initialize preprocessor and preprocess input image
lowerCAmelCase = convert_image_processor(a__ )
lowerCAmelCase = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCAmelCase = hf_model(**a__ )
lowerCAmelCase = outputs.logits.detach().numpy()
# Original model inference
lowerCAmelCase = False
lowerCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
lowerCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowerCAmelCase = image.img_to_array(a__ )
lowerCAmelCase = np.expand_dims(a__ , axis=0 )
lowerCAmelCase = original_model.predict(a__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(a__ , a__ , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(a__ ):
os.mkdir(a__ )
# Save converted model and image processor
hf_model.save_pretrained(a__ )
preprocessor.save_pretrained(a__ )
if push_to_hub:
# Push model and image processor to hub
print(f'Pushing converted {model_name} to the hub...' )
lowerCAmelCase = f'efficientnet-{model_name}'
preprocessor.push_to_hub(a__ )
hf_model.push_to_hub(a__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
UpperCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) | 433 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.