code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput UpperCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name def A ( _UpperCAmelCase : Union[List, PIL.Image.Image, torch.Tensor] ) -> Optional[Any]: '''simple docstring''' warnings.warn( 'The preprocess method is deprecated and will be removed in a future version. Please' ' use VaeImageProcessor.preprocess instead' , _UpperCAmelCase , ) if isinstance(_UpperCAmelCase , torch.Tensor ): return image elif isinstance(_UpperCAmelCase , PIL.Image.Image ): _UpperCAmelCase = [image] if isinstance(image[0] , PIL.Image.Image ): _UpperCAmelCase , _UpperCAmelCase = image[0].size _UpperCAmelCase , _UpperCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 _UpperCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] _UpperCAmelCase = np.concatenate(_UpperCAmelCase , axis=0 ) _UpperCAmelCase = np.array(_UpperCAmelCase ).astype(np.floataa ) / 255.0 _UpperCAmelCase = image.transpose(0 , 3 , 1 , 2 ) _UpperCAmelCase = 2.0 * image - 1.0 _UpperCAmelCase = torch.from_numpy(_UpperCAmelCase ) elif isinstance(image[0] , torch.Tensor ): _UpperCAmelCase = torch.cat(_UpperCAmelCase , dim=0 ) return image def A ( _UpperCAmelCase : Union[List, PIL.Image.Image, torch.Tensor] ) -> List[Any]: '''simple docstring''' if isinstance(_UpperCAmelCase , torch.Tensor ): return mask elif isinstance(_UpperCAmelCase , PIL.Image.Image ): _UpperCAmelCase = [mask] if isinstance(mask[0] , PIL.Image.Image ): _UpperCAmelCase , _UpperCAmelCase = mask[0].size _UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _UpperCAmelCase = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask] _UpperCAmelCase = np.concatenate(_UpperCAmelCase , axis=0 ) _UpperCAmelCase = mask.astype(np.floataa ) / 255.0 _UpperCAmelCase = 0 _UpperCAmelCase = 1 _UpperCAmelCase = torch.from_numpy(_UpperCAmelCase ) elif isinstance(mask[0] , torch.Tensor ): _UpperCAmelCase = torch.cat(_UpperCAmelCase , dim=0 ) return mask class __lowerCAmelCase ( A ): UpperCamelCase = 42 UpperCamelCase = 42 def __init__( self : Union[str, Any] , A : List[Any] , A : List[str]) -> Dict: """simple docstring""" super().__init__() self.register_modules(unet=A , scheduler=A) @torch.no_grad() def __call__( self : Tuple , A : Union[torch.Tensor, PIL.Image.Image] , A : Union[torch.Tensor, PIL.Image.Image] , A : int = 2_50 , A : float = 0.0 , A : int = 10 , A : int = 10 , A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A : Optional[str] = "pil" , A : bool = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" _UpperCAmelCase = image _UpperCAmelCase = _preprocess_image(A) _UpperCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype) _UpperCAmelCase = _preprocess_mask(A) _UpperCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype) _UpperCAmelCase = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(A , A) and len(A) != batch_size: raise ValueError( F"You have passed a list of generators of length {len(A)}, but requested an effective batch" F" size of {batch_size}. Make sure the batch size matches the length of the generators.") _UpperCAmelCase = original_image.shape _UpperCAmelCase = randn_tensor(A , generator=A , device=self.device , dtype=self.unet.dtype) # set step values self.scheduler.set_timesteps(A , A , A , self.device) _UpperCAmelCase = eta _UpperCAmelCase = self.scheduler.timesteps[0] + 1 _UpperCAmelCase = generator[0] if isinstance(A , A) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): if t < t_last: # predict the noise residual _UpperCAmelCase = self.unet(A , A).sample # compute previous image: x_t -> x_t-1 _UpperCAmelCase = self.scheduler.step(A , A , A , A , A , A).prev_sample else: # compute the reverse: x_t-1 -> x_t _UpperCAmelCase = self.scheduler.undo_step(A , A , A) _UpperCAmelCase = t _UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1) _UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _UpperCAmelCase = self.numpy_to_pil(A) if not return_dict: return (image,) return ImagePipelineOutput(images=A)
339
import sys from collections import defaultdict class __lowerCAmelCase : def __init__( self : int) -> str: """simple docstring""" _UpperCAmelCase = [] def _lowerCamelCase ( self : Any , A : List[str]) -> int: """simple docstring""" return self.node_position[vertex] def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = pos def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _UpperCAmelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _UpperCAmelCase = 2 * start + 1 else: _UpperCAmelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: _UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child] _UpperCAmelCase , _UpperCAmelCase = ( heap[start], positions[start], ) _UpperCAmelCase , _UpperCAmelCase = temp, tempa _UpperCAmelCase = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , A) self.top_to_bottom(A , A , A , A) def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any: """simple docstring""" _UpperCAmelCase = position[index] while index != 0: _UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: _UpperCAmelCase = heap[parent] _UpperCAmelCase = position[parent] self.set_position(position[parent] , A) else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , A) break _UpperCAmelCase = parent else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , 0) def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str: """simple docstring""" _UpperCAmelCase = len(A) // 2 - 1 for i in range(A , -1 , -1): self.top_to_bottom(A , A , len(A) , A) def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = positions[0] _UpperCAmelCase = sys.maxsize self.top_to_bottom(A , 0 , len(A) , A) return temp def A ( _UpperCAmelCase : int ) -> Any: '''simple docstring''' _UpperCAmelCase = Heap() _UpperCAmelCase = [0] * len(_UpperCAmelCase ) _UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex _UpperCAmelCase = [] for vertex in range(len(_UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_UpperCAmelCase ) heap.node_position.append(_UpperCAmelCase ) _UpperCAmelCase = [] _UpperCAmelCase = 1 _UpperCAmelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: _UpperCAmelCase = 0 _UpperCAmelCase = distance heap.heapify(_UpperCAmelCase , _UpperCAmelCase ) for _ in range(1 , len(_UpperCAmelCase ) ): _UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _UpperCAmelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_UpperCAmelCase )] ): _UpperCAmelCase = distance heap.bottom_to_top( _UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase__ = int(input("Enter number of edges: ").strip()) UpperCAmelCase__ = defaultdict(list) for _ in range(edges_number): UpperCAmelCase__ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
339
1
from functools import lru_cache def A ( _UpperCAmelCase : int ) -> set: '''simple docstring''' _UpperCAmelCase = 2 _UpperCAmelCase = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(_UpperCAmelCase ) if n > 1: factors.add(_UpperCAmelCase ) return factors @lru_cache def A ( _UpperCAmelCase : int ) -> int: '''simple docstring''' return len(unique_prime_factors(_UpperCAmelCase ) ) def A ( _UpperCAmelCase : list ) -> bool: '''simple docstring''' return len(set(_UpperCAmelCase ) ) in (0, 1) def A ( _UpperCAmelCase : int ) -> list: '''simple docstring''' _UpperCAmelCase = 2 while True: # Increment each value of a generated range _UpperCAmelCase = [base + i for i in range(_UpperCAmelCase )] # Run elements through out unique_prime_factors function # Append our target number to the end. _UpperCAmelCase = [upf_len(_UpperCAmelCase ) for x in group] checker.append(_UpperCAmelCase ) # If all numbers in the list are equal, return the group variable. if equality(_UpperCAmelCase ): return group # Increment our base variable by 1 base += 1 def A ( _UpperCAmelCase : int = 4 ) -> int: '''simple docstring''' _UpperCAmelCase = run(_UpperCAmelCase ) return results[0] if len(_UpperCAmelCase ) else None if __name__ == "__main__": print(solution())
339
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]: '''simple docstring''' # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>' ) == 1 _UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1 _UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple _UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _UpperCAmelCase = logits[0, masked_index, :] _UpperCAmelCase = logits.softmax(dim=0 ) _UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 ) _UpperCAmelCase = ' '.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] ) _UpperCAmelCase = tokenizer.mask_token _UpperCAmelCase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ): _UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' ) if " {0}".format(_UpperCAmelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base") UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() UpperCAmelCase__ = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
339
1
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __lowerCAmelCase ( A , A , unittest.TestCase ): UpperCamelCase = StableDiffusionDiffEditPipeline UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} UpperCamelCase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCamelCase = frozenset([] ) def _lowerCamelCase ( self : List[str]) -> Optional[Any]: """simple docstring""" torch.manual_seed(0) _UpperCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , ) _UpperCAmelCase = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=A , set_alpha_to_one=A , ) _UpperCAmelCase = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=A , set_alpha_to_zero=A , ) torch.manual_seed(0) _UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0) _UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , ) _UpperCAmelCase = CLIPTextModel(A) _UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') _UpperCAmelCase = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _lowerCamelCase ( self : List[Any] , A : List[Any] , A : Tuple=0) -> str: """simple docstring""" _UpperCAmelCase = floats_tensor((1, 16, 16) , rng=random.Random(A)).to(A) _UpperCAmelCase = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A)).to(A) if str(A).startswith('mps'): _UpperCAmelCase = torch.manual_seed(A) else: _UpperCAmelCase = torch.Generator(device=A).manual_seed(A) _UpperCAmelCase = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def _lowerCamelCase ( self : Union[str, Any] , A : Dict , A : List[str]=0) -> str: """simple docstring""" _UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A) _UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1)[0] _UpperCAmelCase = Image.fromarray(np.uinta(A)).convert('RGB') if str(A).startswith('mps'): _UpperCAmelCase = torch.manual_seed(A) else: _UpperCAmelCase = torch.Generator(device=A).manual_seed(A) _UpperCAmelCase = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def _lowerCamelCase ( self : int , A : Optional[int] , A : str=0) -> int: """simple docstring""" _UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A) _UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1)[0] _UpperCAmelCase = Image.fromarray(np.uinta(A)).convert('RGB') if str(A).startswith('mps'): _UpperCAmelCase = torch.manual_seed(A) else: _UpperCAmelCase = torch.Generator(device=A).manual_seed(A) _UpperCAmelCase = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def _lowerCamelCase ( self : Optional[int]) -> int: """simple docstring""" if not hasattr(self.pipeline_class , '_optional_components'): return _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**A) pipe.to(A) pipe.set_progress_bar_config(disable=A) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A , A , A) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) _UpperCAmelCase = self.get_dummy_inputs(A) _UpperCAmelCase = pipe(**A)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A) _UpperCAmelCase = self.pipeline_class.from_pretrained(A) pipe_loaded.to(A) pipe_loaded.set_progress_bar_config(disable=A) for optional_component in pipe._optional_components: self.assertTrue( getattr(A , A) is None , F"`{optional_component}` did not stay set to None after loading." , ) _UpperCAmelCase = self.get_dummy_inputs(A) _UpperCAmelCase = pipe_loaded(**A)[0] _UpperCAmelCase = np.abs(output - output_loaded).max() self.assertLess(A , 1E-4) def _lowerCamelCase ( self : List[str]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**A) pipe.to(A) pipe.set_progress_bar_config(disable=A) _UpperCAmelCase = self.get_dummy_mask_inputs(A) _UpperCAmelCase = pipe.generate_mask(**A) _UpperCAmelCase = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16)) _UpperCAmelCase = np.array([0] * 9) _UpperCAmelCase = np.abs(mask_slice.flatten() - expected_slice).max() self.assertLessEqual(A , 1E-3) self.assertEqual(mask[0, -3, -4] , 0) def _lowerCamelCase ( self : str) -> str: """simple docstring""" _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**A) pipe.to(A) pipe.set_progress_bar_config(disable=A) _UpperCAmelCase = self.get_dummy_inversion_inputs(A) _UpperCAmelCase = pipe.invert(**A).images _UpperCAmelCase = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3)) _UpperCAmelCase = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) _UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(A , 1E-3) def _lowerCamelCase ( self : Optional[int]) -> Union[str, Any]: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=5E-3) def _lowerCamelCase ( self : Dict) -> Dict: """simple docstring""" _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = {'beta_start': 0.0_0_0_8_5, 'beta_end': 0.0_1_2, 'beta_schedule': 'scaled_linear'} _UpperCAmelCase = DPMSolverMultistepScheduler(**A) _UpperCAmelCase = DPMSolverMultistepInverseScheduler(**A) _UpperCAmelCase = self.pipeline_class(**A) pipe.to(A) pipe.set_progress_bar_config(disable=A) _UpperCAmelCase = self.get_dummy_inversion_inputs(A) _UpperCAmelCase = pipe.invert(**A).images _UpperCAmelCase = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3)) _UpperCAmelCase = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) _UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(A , 1E-3) @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : str) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _lowerCamelCase ( cls : int) -> List[Any]: """simple docstring""" _UpperCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png') _UpperCAmelCase = raw_image.convert('RGB').resize((7_68, 7_68)) _UpperCAmelCase = raw_image def _lowerCamelCase ( self : Optional[int]) -> List[str]: """simple docstring""" _UpperCAmelCase = torch.manual_seed(0) _UpperCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=A , torch_dtype=torch.floataa) _UpperCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config) _UpperCAmelCase = DDIMInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A) _UpperCAmelCase = 'a bowl of fruit' _UpperCAmelCase = 'a bowl of pears' _UpperCAmelCase = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) _UpperCAmelCase = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A).latents _UpperCAmelCase = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type='numpy' , ).images[0] _UpperCAmelCase = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png').resize((7_68, 7_68))) / 2_55 ) assert np.abs((expected_image - image).max()) < 5E-1 def _lowerCamelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" _UpperCAmelCase = torch.manual_seed(0) _UpperCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=A , torch_dtype=torch.floataa) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _UpperCAmelCase = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A) _UpperCAmelCase = 'a bowl of fruit' _UpperCAmelCase = 'a bowl of pears' _UpperCAmelCase = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) _UpperCAmelCase = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents _UpperCAmelCase = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0] _UpperCAmelCase = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png').resize((7_68, 7_68))) / 2_55 ) assert np.abs((expected_image - image).max()) < 5E-1
339
import math import unittest def A ( _UpperCAmelCase : int ) -> bool: '''simple docstring''' assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" self.assertTrue(is_prime(2)) self.assertTrue(is_prime(3)) self.assertTrue(is_prime(5)) self.assertTrue(is_prime(7)) self.assertTrue(is_prime(11)) self.assertTrue(is_prime(13)) self.assertTrue(is_prime(17)) self.assertTrue(is_prime(19)) self.assertTrue(is_prime(23)) self.assertTrue(is_prime(29)) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" with self.assertRaises(A): is_prime(-19) self.assertFalse( is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , ) self.assertFalse( is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , ) self.assertFalse(is_prime(2 * 2)) self.assertFalse(is_prime(2 * 3)) self.assertFalse(is_prime(3 * 3)) self.assertFalse(is_prime(3 * 5)) self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": unittest.main()
339
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json", "google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class __lowerCAmelCase ( A ): UpperCamelCase = '''mobilenet_v1''' def __init__( self : Dict , A : List[str]=3 , A : str=2_24 , A : Dict=1.0 , A : Optional[Any]=8 , A : Tuple="relu6" , A : Tuple=True , A : Union[str, Any]=0.9_9_9 , A : List[Any]=0.0_2 , A : Optional[Any]=0.0_0_1 , **A : Optional[int] , ) -> str: """simple docstring""" super().__init__(**A) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.') _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = depth_multiplier _UpperCAmelCase = min_depth _UpperCAmelCase = hidden_act _UpperCAmelCase = tf_padding _UpperCAmelCase = classifier_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps class __lowerCAmelCase ( A ): UpperCamelCase = version.parse('''1.11''' ) @property def _lowerCamelCase ( self : List[str]) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict([('pixel_values', {0: 'batch'})]) @property def _lowerCamelCase ( self : Dict) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})]) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})]) @property def _lowerCamelCase ( self : Union[str, Any]) -> float: """simple docstring""" return 1E-4
339
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : str) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=A , hypotheses=A , min_len=A , max_len=A) }
339
1
UpperCAmelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" UpperCAmelCase__ = [{"type": "code", "content": INSTALL_CONTENT}] UpperCAmelCase__ = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
339
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer UpperCAmelCase__ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast UpperCAmelCase__ = TaTokenizerFast UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys UpperCAmelCase__ = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
339
1
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase__ = "platform" import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Any=None , ) -> str: '''simple docstring''' if attention_mask is None: _UpperCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: _UpperCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: _UpperCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class __lowerCAmelCase : def __init__( self : int , A : Tuple , A : Union[str, Any]=13 , A : List[str]=7 , A : Union[str, Any]=True , A : List[str]=False , A : Any=99 , A : Tuple=16 , A : Optional[Any]=2 , A : Optional[Any]=4 , A : str=4 , A : Any="gelu" , A : Dict=0.1 , A : str=0.1 , A : Optional[Any]=32 , A : Any=2 , A : int=1 , A : Dict=0 , A : Any=0.0_2 , ) -> Any: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id _UpperCAmelCase = initializer_range def _lowerCamelCase ( self : int) -> List[str]: """simple docstring""" _UpperCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size) _UpperCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1) _UpperCAmelCase = shift_tokens_right(A , 1 , 2) _UpperCAmelCase = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=A , ) _UpperCAmelCase = prepare_blenderbot_inputs_dict(A , A , A) return config, inputs_dict def _lowerCamelCase ( self : str) -> List[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def _lowerCamelCase ( self : Optional[int] , A : List[str] , A : int , A : Optional[int]) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(A) _UpperCAmelCase = model.encode(inputs_dict['input_ids']) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , A , A) _UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4') _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4') _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , A , decoder_attention_mask=A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A , ) _UpperCAmelCase = model.decode(A , A) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}") def _lowerCamelCase ( self : Optional[Any] , A : Optional[Any] , A : str , A : str) -> Any: """simple docstring""" _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(A) _UpperCAmelCase = model.encode(inputs_dict['input_ids']) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _UpperCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ] , axis=-1 , ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , A , A) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4') _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A , decoder_position_ids=A , ) _UpperCAmelCase = model.decode(A , A , decoder_attention_mask=A) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}") @require_flax class __lowerCAmelCase ( unittest.TestCase ): UpperCamelCase = 9_9 def _lowerCamelCase ( self : str) -> str: """simple docstring""" _UpperCAmelCase = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) _UpperCAmelCase = input_ids.shape[0] _UpperCAmelCase = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def _lowerCamelCase ( self : int) -> List[str]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_config_and_data() _UpperCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(A) _UpperCAmelCase = lm_model(input_ids=A) _UpperCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['logits'].shape , A) def _lowerCamelCase ( self : List[Any]) -> Optional[int]: """simple docstring""" _UpperCAmelCase = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) _UpperCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(A) _UpperCAmelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa) _UpperCAmelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa) _UpperCAmelCase = lm_model(input_ids=A , decoder_input_ids=A) _UpperCAmelCase = (*summary.shape, config.vocab_size) self.assertEqual(outputs['logits'].shape , A) def _lowerCamelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa) _UpperCAmelCase = shift_tokens_right(A , 1 , 2) _UpperCAmelCase = np.equal(A , 1).astype(np.floataa).sum() _UpperCAmelCase = np.equal(A , 1).astype(np.floataa).sum() self.assertEqual(shifted.shape , input_ids.shape) self.assertEqual(A , n_pad_before - 1) self.assertTrue(np.equal(shifted[:, 0] , 2).all()) @require_flax class __lowerCAmelCase ( A , unittest.TestCase , A ): UpperCamelCase = True UpperCamelCase = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) UpperCamelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def _lowerCamelCase ( self : Any) -> Optional[int]: """simple docstring""" _UpperCAmelCase = FlaxBlenderbotSmallModelTester(self) def _lowerCamelCase ( self : str) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(A , A , A) def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(A , A , A) def _lowerCamelCase ( self : Any) -> List[str]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): _UpperCAmelCase = self._prepare_for_class(A , A) _UpperCAmelCase = model_class(A) @jax.jit def encode_jitted(A : Any , A : str=None , **A : str): return model.encode(input_ids=A , attention_mask=A) with self.subTest('JIT Enabled'): _UpperCAmelCase = encode_jitted(**A).to_tuple() with self.subTest('JIT Disabled'): with jax.disable_jit(): _UpperCAmelCase = encode_jitted(**A).to_tuple() self.assertEqual(len(A) , len(A)) for jitted_output, output in zip(A , A): self.assertEqual(jitted_output.shape , output.shape) def _lowerCamelCase ( self : List[str]) -> int: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): _UpperCAmelCase = model_class(A) _UpperCAmelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask']) _UpperCAmelCase = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(A : List[str] , A : int , A : str): return model.decode( decoder_input_ids=A , decoder_attention_mask=A , encoder_outputs=A , ) with self.subTest('JIT Enabled'): _UpperCAmelCase = decode_jitted(**A).to_tuple() with self.subTest('JIT Disabled'): with jax.disable_jit(): _UpperCAmelCase = decode_jitted(**A).to_tuple() self.assertEqual(len(A) , len(A)) for jitted_output, output in zip(A , A): self.assertEqual(jitted_output.shape , output.shape) @slow def _lowerCamelCase ( self : List[str]) -> int: """simple docstring""" for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained('facebook/blenderbot_small-90M') # FlaxBlenderbotForSequenceClassification expects eos token in input_ids _UpperCAmelCase = np.ones((1, 1)) * model.config.eos_token_id _UpperCAmelCase = model(A) self.assertIsNotNone(A)
339
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class __lowerCAmelCase ( A ): UpperCamelCase = '''open-llama''' def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = intermediate_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = rms_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = kwargs.pop( 'use_memorry_efficient_attention' , A) _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_dropout_prob _UpperCAmelCase = use_stable_embedding _UpperCAmelCase = shared_input_output_embedding _UpperCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , ) def _lowerCamelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F"got {self.rope_scaling}") _UpperCAmelCase = self.rope_scaling.get('type' , A) _UpperCAmelCase = self.rope_scaling.get('factor' , A) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
339
1
def A ( _UpperCAmelCase : list ) -> list: '''simple docstring''' for i in range(len(_UpperCAmelCase ) - 1 , 0 , -1 ): _UpperCAmelCase = False for j in range(_UpperCAmelCase , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: _UpperCAmelCase , _UpperCAmelCase = unsorted[j - 1], unsorted[j] _UpperCAmelCase = True for j in range(_UpperCAmelCase ): if unsorted[j] > unsorted[j + 1]: _UpperCAmelCase , _UpperCAmelCase = unsorted[j + 1], unsorted[j] _UpperCAmelCase = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip() UpperCAmelCase__ = [int(item) for item in user_input.split(",")] print(f"""{cocktail_shaker_sort(unsorted) = }""")
339
def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') ) def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = credit_card_number _UpperCAmelCase = 0 _UpperCAmelCase = len(_UpperCAmelCase ) - 2 for i in range(_UpperCAmelCase , -1 , -2 ): # double the value of every second digit _UpperCAmelCase = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 _UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(F"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(_UpperCAmelCase ) <= 16: print(F"{error_message} of its length." ) return False if not validate_initial_digits(_UpperCAmelCase ): print(F"{error_message} of its first two digits." ) return False if not luhn_validation(_UpperCAmelCase ): print(F"{error_message} it fails the Luhn check." ) return False print(F"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("4111111111111111") validate_credit_card_number("32323")
339
1
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) UpperCAmelCase__ = "\\n Text data.\n Second line of data." UpperCAmelCase__ = "file" @pytest.fixture(scope='session' ) def A ( _UpperCAmelCase : str ) -> Tuple: '''simple docstring''' _UpperCAmelCase = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd') _UpperCAmelCase = bytes(_UpperCAmelCase , 'utf-8' ) with zstd.open(_UpperCAmelCase , 'wb' ) as f: f.write(_UpperCAmelCase ) return path @pytest.fixture def A ( _UpperCAmelCase : Union[str, Any] ) -> Dict: '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , 'w' ) as f: f.write(_UpperCAmelCase ) return FILE_PATH @pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] ) def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> str: '''simple docstring''' _UpperCAmelCase = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path} _UpperCAmelCase = input_paths[compression_format] _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase ) _UpperCAmelCase = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) with open(_UpperCAmelCase ) as f: _UpperCAmelCase = f.read() with open(_UpperCAmelCase ) as f: _UpperCAmelCase = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('default_extracted' , [True, False] ) @pytest.mark.parametrize('default_cache_dir' , [True, False] ) def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> Dict: '''simple docstring''' _UpperCAmelCase = 'custom_cache' _UpperCAmelCase = 'custom_extracted_dir' _UpperCAmelCase = tmp_path / 'custom_extracted_path' if default_extracted: _UpperCAmelCase = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted') else: monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , _UpperCAmelCase ) monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_UpperCAmelCase ) ) _UpperCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _UpperCAmelCase = xz_file _UpperCAmelCase = ( DownloadConfig(extract_compressed_file=_UpperCAmelCase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase ) ) _UpperCAmelCase = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected def A ( _UpperCAmelCase : Tuple ) -> Optional[int]: '''simple docstring''' # absolute path _UpperCAmelCase = str(Path(_UpperCAmelCase ).resolve() ) assert cached_path(_UpperCAmelCase ) == text_file # relative path _UpperCAmelCase = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_UpperCAmelCase ) == text_file def A ( _UpperCAmelCase : List[str] ) -> Optional[int]: '''simple docstring''' # absolute path _UpperCAmelCase = str(tmp_path.resolve() / '__missing_file__.txt' ) with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) # relative path _UpperCAmelCase = './__missing_file__.txt' with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = get_from_cache(F"tmp://{tmpfs_file}" ) with open(_UpperCAmelCase ) as f: _UpperCAmelCase = f.read() assert output_file_content == FILE_CONTENT @patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCAmelCase ) def A ( ) -> Dict: '''simple docstring''' with pytest.raises(_UpperCAmelCase ): cached_path('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(_UpperCAmelCase ): http_get('https://huggingface.co' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): http_head('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> int: '''simple docstring''' _UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(_UpperCAmelCase ): ftp_get('ftp://huggingface.co' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): ftp_head('ftp://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> int: '''simple docstring''' _UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(_UpperCAmelCase ): fsspec_get('s3://huggingface.co' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): fsspec_head('s3://huggingface.co' )
339
from functools import reduce UpperCAmelCase__ = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def A ( _UpperCAmelCase : str = N ) -> int: '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(_UpperCAmelCase ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
339
1
from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging UpperCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class __lowerCAmelCase ( A ): def __init__( self : Union[str, Any] , A : CLIPSegForImageSegmentation , A : CLIPSegProcessor , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , ) -> int: """simple docstring""" super().__init__() if hasattr(scheduler.config , 'steps_offset') and scheduler.config.steps_offset != 1: _UpperCAmelCase = ( F"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" F" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " 'to update the config accordingly as leaving `steps_offset` might led to incorrect results' ' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,' ' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`' ' file' ) deprecate('steps_offset!=1' , '1.0.0' , A , standard_warn=A) _UpperCAmelCase = dict(scheduler.config) _UpperCAmelCase = 1 _UpperCAmelCase = FrozenDict(A) if hasattr(scheduler.config , 'skip_prk_steps') and scheduler.config.skip_prk_steps is False: _UpperCAmelCase = ( F"The configuration file of this scheduler: {scheduler} has not set the configuration" ' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make' ' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to' ' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face' ' Hub, it would be very nice if you could open a Pull request for the' ' `scheduler/scheduler_config.json` file' ) deprecate('skip_prk_steps not set' , '1.0.0' , A , standard_warn=A) _UpperCAmelCase = dict(scheduler.config) _UpperCAmelCase = True _UpperCAmelCase = FrozenDict(A) if safety_checker is None: logger.warning( F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') self.register_modules( segmentation_model=A , segmentation_processor=A , vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , ) def _lowerCamelCase ( self : Tuple , A : Optional[Union[str, int]] = "auto") -> str: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _UpperCAmelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A) def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" self.enable_attention_slicing(A) def _lowerCamelCase ( self : int) -> Union[str, Any]: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`') _UpperCAmelCase = torch.device('cuda') for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(A , A) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowerCamelCase ( self : Any) -> Dict: """simple docstring""" if self.device != torch.device('meta') or not hasattr(self.unet , '_hf_hook'): return self.device for module in self.unet.modules(): if ( hasattr(A , '_hf_hook') and hasattr(module._hf_hook , 'execution_device') and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device @torch.no_grad() def __call__( self : Dict , A : Union[str, List[str]] , A : Union[torch.FloatTensor, PIL.Image.Image] , A : str , A : int = 5_12 , A : int = 5_12 , A : int = 50 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , **A : Tuple , ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.segmentation_processor( text=[text] , images=[image] , padding='max_length' , return_tensors='pt').to(self.device) _UpperCAmelCase = self.segmentation_model(**A) _UpperCAmelCase = torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy() _UpperCAmelCase = self.numpy_to_pil(A)[0].resize(image.size) # Run inpainting pipeline with the generated mask _UpperCAmelCase = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=A , image=A , mask_image=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , )
339
from __future__ import annotations from collections.abc import Callable UpperCAmelCase__ = list[list[float | int]] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for row in range(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = matrix[row][col] _UpperCAmelCase = vector[row][0] _UpperCAmelCase = 0 _UpperCAmelCase = 0 while row < size and col < size: # pivoting _UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: _UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , _UpperCAmelCase ): _UpperCAmelCase = augmented[rowa][col] / augmented[row][col] _UpperCAmelCase = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , _UpperCAmelCase ): for row in range(_UpperCAmelCase ): _UpperCAmelCase = augmented[row][col] / augmented[col][col] for cola in range(_UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase ) ] def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for x_val, y_val in enumerate(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = (x_val + 1) ** (size - col - 1) _UpperCAmelCase = y_val _UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase ) def interpolated_func(_UpperCAmelCase : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(_UpperCAmelCase ) ) return interpolated_func def A ( _UpperCAmelCase : int ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int: '''simple docstring''' _UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )] _UpperCAmelCase = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] _UpperCAmelCase = 0 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for poly in polynomials: _UpperCAmelCase = 1 while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ): x_val += 1 ret += poly(_UpperCAmelCase ) return ret if __name__ == "__main__": print(f"""{solution() = }""")
339
1
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : List[Any]) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence'), 'references': datasets.Value('string' , id='sequence'), }) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]: """simple docstring""" import nltk nltk.download('wordnet') if NLTK_VERSION >= version.Version('3.6.5'): nltk.download('punkt') if NLTK_VERSION >= version.Version('3.6.6'): nltk.download('omw-1.4') def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any: """simple docstring""" if NLTK_VERSION >= version.Version('3.6.5'): _UpperCAmelCase = [ meteor_score.single_meteor_score( word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] else: _UpperCAmelCase = [ meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] return {"meteor": np.mean(A)}
339
from __future__ import annotations def A ( _UpperCAmelCase : list[int] ) -> bool: '''simple docstring''' return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class __lowerCAmelCase ( A , A ): @register_to_config def __init__( self : List[str] , A : int = 7_68 , ) -> Any: """simple docstring""" super().__init__() _UpperCAmelCase = nn.Parameter(torch.zeros(1 , A)) _UpperCAmelCase = nn.Parameter(torch.ones(1 , A)) def _lowerCamelCase ( self : int , A : Optional[Union[str, torch.device]] = None , A : Optional[torch.dtype] = None , ) -> List[str]: """simple docstring""" _UpperCAmelCase = nn.Parameter(self.mean.to(A).to(A)) _UpperCAmelCase = nn.Parameter(self.std.to(A).to(A)) return self def _lowerCamelCase ( self : Optional[Any] , A : Tuple) -> Any: """simple docstring""" _UpperCAmelCase = (embeds - self.mean) * 1.0 / self.std return embeds def _lowerCamelCase ( self : List[Any] , A : List[Any]) -> str: """simple docstring""" _UpperCAmelCase = (embeds * self.std) + self.mean return embeds
339
import os UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} def A ( _UpperCAmelCase : str ) -> int: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(_UpperCAmelCase ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A ( _UpperCAmelCase : int ) -> str: '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1_000 numerals += m_count * "M" num %= 1_000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int: '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase ) _UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase ) savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase ) return savings if __name__ == "__main__": print(f"""{solution() = }""")
339
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {"vocab_file": "sentencepiece.bpe.model"} UpperCAmelCase__ = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", } } UpperCAmelCase__ = { "camembert-base": 512, } UpperCAmelCase__ = "▁" class __lowerCAmelCase ( A ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : List[str] , A : List[str] , A : Tuple="<s>" , A : str="</s>" , A : Dict="</s>" , A : int="<s>" , A : str="<unk>" , A : Optional[int]="<pad>" , A : List[Any]="<mask>" , A : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , A : Optional[Dict[str, Any]] = None , **A : List[str] , ) -> None: """simple docstring""" _UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token _UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , **A , ) _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(A)) _UpperCAmelCase = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> _UpperCAmelCase = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3} _UpperCAmelCase = len(self.fairseq_tokens_to_ids) _UpperCAmelCase = len(self.sp_model) + len(self.fairseq_tokens_to_ids) _UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def _lowerCamelCase ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] _UpperCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self : Any , A : List[int] , A : Optional[List[int]] = None , A : bool = False) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A , token_ids_a=A , already_has_special_tokens=A) if token_ids_a is None: return [1] + ([0] * len(A)) + [1] return [1] + ([0] * len(A)) + [1, 1] + ([0] * len(A)) + [1] def _lowerCamelCase ( self : Any , A : List[int] , A : Optional[List[int]] = None) -> List[int]: """simple docstring""" _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def _lowerCamelCase ( self : int) -> int: """simple docstring""" return len(self.fairseq_tokens_to_ids) + len(self.sp_model) def _lowerCamelCase ( self : str) -> Dict: """simple docstring""" _UpperCAmelCase = {self.convert_ids_to_tokens(A): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _lowerCamelCase ( self : int , A : str) -> List[str]: """simple docstring""" return self.sp_model.encode(A , out_type=A) def _lowerCamelCase ( self : Tuple , A : int) -> Any: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(A) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(A) def _lowerCamelCase ( self : Any , A : Optional[int]) -> Optional[Any]: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def _lowerCamelCase ( self : List[Any] , A : str) -> Tuple: """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = '' _UpperCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A) + token _UpperCAmelCase = True _UpperCAmelCase = [] else: current_sub_tokens.append(A) _UpperCAmelCase = False out_string += self.sp_model.decode(A) return out_string.strip() def __getstate__( self : str) -> Dict: """simple docstring""" _UpperCAmelCase = self.__dict__.copy() _UpperCAmelCase = None return state def __setstate__( self : Dict , A : Any) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): _UpperCAmelCase = {} _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _lowerCamelCase ( self : Optional[Any] , A : str , A : Optional[str] = None) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return _UpperCAmelCase = os.path.join( A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(A) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , A) elif not os.path.isfile(self.vocab_file): with open(A , 'wb') as fi: _UpperCAmelCase = self.sp_model.serialized_model_proto() fi.write(A) return (out_vocab_file,)
339
import requests from bsa import BeautifulSoup def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str: '''simple docstring''' _UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' ) _UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} ) _UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' ) return anchors[2].get_text() if __name__ == "__main__": UpperCAmelCase__ = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
339
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ = { "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"], "processing_mgp_str": ["MgpstrProcessor"], "tokenization_mgp_str": ["MgpstrTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_attention_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_choices def _lowerCamelCase ( self : Optional[Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCAmelCase = None if self.use_attention_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCAmelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCamelCase ( self : List[Any]) -> List[str]: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = True UpperCamelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" _UpperCAmelCase = FlaxRoFormerModelTester(self) @slow def _lowerCamelCase ( self : List[Any]) -> Dict: """simple docstring""" for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A) _UpperCAmelCase = model(np.ones((1, 1))) self.assertIsNotNone(A) @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def _lowerCamelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base') _UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]]) _UpperCAmelCase = model(A)[0] _UpperCAmelCase = 5_00_00 _UpperCAmelCase = (1, 6, vocab_size) self.assertEqual(output.shape , A) _UpperCAmelCase = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]]) self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
339
1
def A ( ) -> list[list[int]]: '''simple docstring''' return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )] UpperCAmelCase__ = generate_large_matrix() UpperCAmelCase__ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A ( _UpperCAmelCase : list[list[int]] ) -> None: '''simple docstring''' assert all(row == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for row in grid ) assert all(list(_UpperCAmelCase ) == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for col in zip(*_UpperCAmelCase ) ) def A ( _UpperCAmelCase : list[int] ) -> int: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = len(_UpperCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: _UpperCAmelCase = (left + right) // 2 _UpperCAmelCase = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: _UpperCAmelCase = mid + 1 else: _UpperCAmelCase = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(_UpperCAmelCase ) def A ( _UpperCAmelCase : list[list[int]] ) -> int: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = len(grid[0] ) for i in range(len(_UpperCAmelCase ) ): _UpperCAmelCase = find_negative_index(grid[i][:bound] ) total += bound return (len(_UpperCAmelCase ) * len(grid[0] )) - total def A ( _UpperCAmelCase : list[list[int]] ) -> int: '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def A ( _UpperCAmelCase : list[list[int]] ) -> int: '''simple docstring''' _UpperCAmelCase = 0 for row in grid: for i, number in enumerate(_UpperCAmelCase ): if number < 0: total += len(_UpperCAmelCase ) - i break return total def A ( ) -> None: '''simple docstring''' from timeit import timeit print('Running benchmarks' ) _UpperCAmelCase = ( 'from __main__ import count_negatives_binary_search, ' 'count_negatives_brute_force, count_negatives_brute_force_with_break, grid' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): _UpperCAmelCase = timeit(F"{func}(grid=grid)" , setup=_UpperCAmelCase , number=500 ) print(F"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
339
UpperCAmelCase__ = {} def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: '''simple docstring''' # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _UpperCAmelCase = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 ) _UpperCAmelCase = state_late + state_absent + state_ontime _UpperCAmelCase = prizestrings return prizestrings def A ( _UpperCAmelCase : int = 30 ) -> int: '''simple docstring''' return _calculate(_UpperCAmelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
339
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCAmelCase__ = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } UpperCAmelCase__ = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } UpperCAmelCase__ = { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } UpperCAmelCase__ = { "facebook/dpr-ctx_encoder-single-nq-base": 512, "facebook/dpr-ctx_encoder-multiset-base": 512, } UpperCAmelCase__ = { "facebook/dpr-question_encoder-single-nq-base": 512, "facebook/dpr-question_encoder-multiset-base": 512, } UpperCAmelCase__ = { "facebook/dpr-reader-single-nq-base": 512, "facebook/dpr-reader-multiset-base": 512, } UpperCAmelCase__ = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } UpperCAmelCase__ = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } UpperCAmelCase__ = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class __lowerCAmelCase ( A ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase = DPRContextEncoderTokenizer class __lowerCAmelCase ( A ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase = DPRQuestionEncoderTokenizer UpperCAmelCase__ = collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) UpperCAmelCase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) UpperCAmelCase__ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(A ) class __lowerCAmelCase : def __call__( self : Dict , A : int , A : Optional[str] = None , A : Optional[str] = None , A : Union[bool, str] = False , A : Union[bool, str] = False , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , A : Optional[bool] = None , **A : int , ) -> BatchEncoding: """simple docstring""" if titles is None and texts is None: return super().__call__( A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , ) elif titles is None or texts is None: _UpperCAmelCase = titles if texts is None else texts return super().__call__( A , A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , ) _UpperCAmelCase = titles if not isinstance(A , A) else [titles] _UpperCAmelCase = texts if not isinstance(A , A) else [texts] _UpperCAmelCase = len(A) _UpperCAmelCase = questions if not isinstance(A , A) else [questions] * n_passages assert len(A) == len( A), F"There should be as many titles than texts but got {len(A)} titles and {len(A)} texts." _UpperCAmelCase = super().__call__(A , A , padding=A , truncation=A)['input_ids'] _UpperCAmelCase = super().__call__(A , add_special_tokens=A , padding=A , truncation=A)['input_ids'] _UpperCAmelCase = { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(A , A) ] } if return_attention_mask is not False: _UpperCAmelCase = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) _UpperCAmelCase = attention_mask return self.pad(A , padding=A , max_length=A , return_tensors=A) def _lowerCamelCase ( self : str , A : BatchEncoding , A : DPRReaderOutput , A : int = 16 , A : int = 64 , A : int = 4 , ) -> List[DPRSpanPrediction]: """simple docstring""" _UpperCAmelCase = reader_input['input_ids'] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reader_output[:3] _UpperCAmelCase = len(A) _UpperCAmelCase = sorted(range(A) , reverse=A , key=relevance_logits.__getitem__) _UpperCAmelCase = [] for doc_id in sorted_docs: _UpperCAmelCase = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence _UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: _UpperCAmelCase = sequence_ids.index(self.pad_token_id) else: _UpperCAmelCase = len(A) _UpperCAmelCase = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=A , top_spans=A , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=A , start_index=A , end_index=A , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(A) >= num_spans: break return nbest_spans_predictions[:num_spans] def _lowerCamelCase ( self : int , A : List[int] , A : List[int] , A : int , A : int , ) -> List[DPRSpanPrediction]: """simple docstring""" _UpperCAmelCase = [] for start_index, start_score in enumerate(A): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) _UpperCAmelCase = sorted(A , key=lambda A: x[1] , reverse=A) _UpperCAmelCase = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F"Wrong span indices: [{start_index}:{end_index}]" _UpperCAmelCase = end_index - start_index + 1 assert length <= max_answer_length, F"Span is too long: {length} > {max_answer_length}" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(A) == top_spans: break return chosen_span_intervals @add_end_docstrings(A ) class __lowerCAmelCase ( A , A ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase = ['''input_ids''', '''attention_mask'''] UpperCamelCase = DPRReaderTokenizer
339
import os import sys import unittest UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers") class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = find_backend(' if not is_torch_available():') self.assertEqual(A , 'torch') # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):') self.assertEqual(A , 'torch_and_transformers') # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _UpperCAmelCase = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):') self.assertEqual(A , 'torch_and_transformers_and_onnx') def _lowerCamelCase ( self : int) -> Dict: """simple docstring""" _UpperCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , A) self.assertIn('torch_and_transformers' , A) self.assertIn('flax_and_transformers' , A) self.assertIn('torch_and_transformers_and_onnx' , A) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch']) self.assertIn('FlaxUNet2DConditionModel' , objects['flax']) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers']) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers']) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy']) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx']) def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'') self.assertEqual(A , '\nCONSTANT = None\n') _UpperCAmelCase = create_dummy_object('function' , '\'torch\'') self.assertEqual( A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n') _UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' _UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'') self.assertEqual(A , A) def _lowerCamelCase ( self : Dict) -> int: """simple docstring""" _UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' _UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']}) self.assertEqual(dummy_files['torch'] , A)
339
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __lowerCAmelCase ( A ): UpperCamelCase = '''philschmid/bart-large-cnn-samsum''' UpperCamelCase = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) UpperCamelCase = '''summarizer''' UpperCamelCase = AutoTokenizer UpperCamelCase = AutoModelForSeqaSeqLM UpperCamelCase = ['''text'''] UpperCamelCase = ['''text'''] def _lowerCamelCase ( self : Any , A : Optional[Any]) -> str: """simple docstring""" return self.pre_processor(A , return_tensors='pt' , truncation=A) def _lowerCamelCase ( self : str , A : List[Any]) -> Optional[Any]: """simple docstring""" return self.model.generate(**A)[0] def _lowerCamelCase ( self : Tuple , A : str) -> List[Any]: """simple docstring""" return self.pre_processor.decode(A , skip_special_tokens=A , clean_up_tokenization_spaces=A)
339
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") UpperCAmelCase__ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , ) UpperCamelCase = field( default=1_0_2_4 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} ) def _lowerCamelCase ( self : str) -> List[Any]: """simple docstring""" if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.') else: _UpperCAmelCase = self.train_file.split('.')[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." _UpperCAmelCase = self.validation_file.split('.')[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __lowerCAmelCase : UpperCamelCase = field( default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) def A ( ) -> Optional[int]: '''simple docstring''' # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_UpperCAmelCase ) datasets.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. _UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: _UpperCAmelCase = data_args.train_file.split('.' )[-1] _UpperCAmelCase = data_args.test_file.split('.' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." _UpperCAmelCase = data_args.test_file else: raise ValueError('Need either a GLUE task or a test file for `do_predict`.' ) for key in data_files.keys(): logger.info(F"load a local file for {key}: {data_files[key]}" ) if data_args.train_file.endswith('.csv' ): # Loading a dataset from local csv files _UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files _UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels _UpperCAmelCase = raw_datasets['train'].features['label'].names _UpperCAmelCase = len(_UpperCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer _UpperCAmelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , ) _UpperCAmelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: _UpperCAmelCase = 'max_length' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _UpperCAmelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. _UpperCAmelCase = {'Refused': 0, 'Entailed': 1} _UpperCAmelCase = {0: 'Refused', 1: 'Entailed'} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ): # Tokenize the texts def _convert_table_text_to_pandas(_UpperCAmelCase : Dict ): _UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )] _UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd _UpperCAmelCase = examples['statement'] _UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) ) _UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ) _UpperCAmelCase = examples['label'] return result with training_args.main_process_first(desc='dataset map pre-processing' ): _UpperCAmelCase = raw_datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) _UpperCAmelCase = raw_datasets['train'] if data_args.max_train_samples is not None: _UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) _UpperCAmelCase = raw_datasets['validation'] if data_args.max_eval_samples is not None: _UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('--do_predict requires a test dataset' ) _UpperCAmelCase = raw_datasets['test'] if data_args.max_predict_samples is not None: _UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ): logger.info(F"Sample {index} of the training set: {train_dataset[index]}." ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_UpperCAmelCase : EvalPrediction ): _UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _UpperCAmelCase = default_data_collator elif training_args.fpaa: _UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) else: _UpperCAmelCase = None # Initialize our Trainer _UpperCAmelCase = Trainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase ) _UpperCAmelCase = train_result.metrics _UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase ) ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('train' , _UpperCAmelCase ) trainer.save_metrics('train' , _UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase ) _UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.log_metrics('eval' , _UpperCAmelCase ) trainer.save_metrics('eval' , _UpperCAmelCase ) if training_args.do_predict: logger.info('*** Predict ***' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. _UpperCAmelCase = predict_dataset.remove_columns('label' ) _UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) _UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' ) if trainer.is_world_process_zero(): with open(_UpperCAmelCase , 'w' ) as writer: logger.info('***** Predict Results *****' ) writer.write('index\tprediction\n' ) for index, item in enumerate(_UpperCAmelCase ): _UpperCAmelCase = label_list[item] writer.write(F"{index}\t{item}\n" ) _UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'} if training_args.push_to_hub: trainer.push_to_hub(**_UpperCAmelCase ) else: trainer.create_model_card(**_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
1
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __lowerCAmelCase ( A , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): @property def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowerCamelCase ( self : List[str]) -> List[str]: """simple docstring""" _UpperCAmelCase = ort.SessionOptions() _UpperCAmelCase = False return options def _lowerCamelCase ( self : str) -> str: """simple docstring""" _UpperCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo.png') _UpperCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo_mask.png') _UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( 'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=A) _UpperCAmelCase = 'A red cat sitting on a park bench' _UpperCAmelCase = np.random.RandomState(0) _UpperCAmelCase = pipe( prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=10 , generator=A , output_type='np' , ) _UpperCAmelCase = output.images _UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) _UpperCAmelCase = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def _lowerCamelCase ( self : int) -> str: """simple docstring""" _UpperCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo.png') _UpperCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo_mask.png') _UpperCAmelCase = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx') _UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( 'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=A , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=A) _UpperCAmelCase = 'A red cat sitting on a park bench' _UpperCAmelCase = np.random.RandomState(0) _UpperCAmelCase = pipe( prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=20 , generator=A , output_type='np' , ) _UpperCAmelCase = output.images _UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) _UpperCAmelCase = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
339
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any: '''simple docstring''' _UpperCAmelCase = multiprocessing.Manager() _UpperCAmelCase = manager.list() _UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('timed out' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil _UpperCAmelCase = shutil.rmtree _UpperCAmelCase = os.rmdir _UpperCAmelCase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: _UpperCAmelCase = {} with swallow_io(): with time_limit(_UpperCAmelCase ): exec(_UpperCAmelCase , _UpperCAmelCase ) result.append('passed' ) except TimeoutException: result.append('timed out' ) except BaseException as e: result.append(F"failed: {e}" ) # Needed for cleaning up. _UpperCAmelCase = rmtree _UpperCAmelCase = rmdir _UpperCAmelCase = chdir @contextlib.contextmanager def A ( _UpperCAmelCase : Union[str, Any] ) -> Any: '''simple docstring''' def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ): raise TimeoutException('Timed out!' ) signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase ) signal.signal(signal.SIGALRM , _UpperCAmelCase ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def A ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = WriteOnlyStringIO() with contextlib.redirect_stdout(_UpperCAmelCase ): with contextlib.redirect_stderr(_UpperCAmelCase ): with redirect_stdin(_UpperCAmelCase ): yield @contextlib.contextmanager def A ( ) -> Any: '''simple docstring''' with tempfile.TemporaryDirectory() as dirname: with chdir(_UpperCAmelCase ): yield dirname class __lowerCAmelCase ( A ): pass class __lowerCAmelCase ( io.StringIO ): def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any: """simple docstring""" raise OSError def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]: """simple docstring""" raise OSError def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]: """simple docstring""" raise OSError def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]: """simple docstring""" return False class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore UpperCamelCase = '''stdin''' @contextlib.contextmanager def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' if root == ".": yield return _UpperCAmelCase = os.getcwd() os.chdir(_UpperCAmelCase ) try: yield except BaseException as exc: raise exc finally: os.chdir(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str]=None ) -> Any: '''simple docstring''' if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins _UpperCAmelCase = None _UpperCAmelCase = None import os _UpperCAmelCase = '1' _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import shutil _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import subprocess _UpperCAmelCase = None # type: ignore _UpperCAmelCase = None import sys _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None
339
1
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __lowerCAmelCase : def __init__( self : Optional[int] , A : Tuple , A : Tuple=14 , A : Dict=7 , A : Any=True , A : str=True , A : Tuple=False , A : Optional[int]=True , A : Tuple=99 , A : List[str]=32 , A : List[str]=4 , A : Dict=4 , A : List[Any]=4 , A : List[str]=37 , A : Union[str, Any]="gelu" , A : Dict=0.1 , A : Tuple=0.1 , A : int=5_12 , A : Optional[Any]=0.0_2 , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = rotary_dim _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = initializer_range _UpperCAmelCase = None _UpperCAmelCase = vocab_size - 1 _UpperCAmelCase = vocab_size - 1 _UpperCAmelCase = vocab_size - 1 def _lowerCamelCase ( self : Optional[Any]) -> str: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCAmelCase = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def _lowerCamelCase ( self : Optional[int]) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def _lowerCamelCase ( self : Tuple , A : List[str] , A : str , A : Any , A : Any) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(A) _UpperCAmelCase = model.init_cache(input_ids.shape[0] , A) _UpperCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4') _UpperCAmelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) _UpperCAmelCase = model( input_ids[:, :-1] , attention_mask=A , past_key_values=A , position_ids=A , ) _UpperCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4') _UpperCAmelCase = model( input_ids[:, -1:] , attention_mask=A , past_key_values=outputs_cache.past_key_values , position_ids=A , ) _UpperCAmelCase = model(A) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}") def _lowerCamelCase ( self : Dict , A : List[Any] , A : Optional[Any] , A : Union[str, Any] , A : Optional[Any]) -> int: """simple docstring""" _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(A) _UpperCAmelCase = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , ) _UpperCAmelCase = model.init_cache(input_ids.shape[0] , A) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) _UpperCAmelCase = model( input_ids[:, :-1] , attention_mask=A , past_key_values=A , position_ids=A , ) _UpperCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4') _UpperCAmelCase = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=A , position_ids=A , ) _UpperCAmelCase = model(A , attention_mask=A) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}") @require_flax class __lowerCAmelCase ( A , A , unittest.TestCase ): UpperCamelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () UpperCamelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else () def _lowerCamelCase ( self : List[Any]) -> Dict: """simple docstring""" _UpperCAmelCase = FlaxGPTJModelTester(self) def _lowerCamelCase ( self : Optional[Any]) -> Optional[Any]: """simple docstring""" for model_class_name in self.all_model_classes: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(A , A , A , A) def _lowerCamelCase ( self : Union[str, Any]) -> Any: """simple docstring""" for model_class_name in self.all_model_classes: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( A , A , A , A) @tooslow def _lowerCamelCase ( self : str) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left') _UpperCAmelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=A , truncation=A) _UpperCAmelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B') _UpperCAmelCase = False _UpperCAmelCase = model.config.eos_token_id _UpperCAmelCase = jax.jit(model.generate) _UpperCAmelCase = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id).sequences _UpperCAmelCase = tokenizer.batch_decode(A , skip_special_tokens=A) _UpperCAmelCase = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(A , A) @is_pt_flax_cross_test def _lowerCamelCase ( self : List[str]) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs _UpperCAmelCase = self._prepare_for_class(A , A) _UpperCAmelCase = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class _UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning _UpperCAmelCase = getattr(A , A) _UpperCAmelCase , _UpperCAmelCase = pt_inputs['input_ids'].shape _UpperCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(A): _UpperCAmelCase = 0 _UpperCAmelCase = 1 _UpperCAmelCase = 0 _UpperCAmelCase = 1 _UpperCAmelCase = pt_model_class(A).eval() _UpperCAmelCase = model_class(A , dtype=jnp.floataa) _UpperCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , A) _UpperCAmelCase = fx_state with torch.no_grad(): _UpperCAmelCase = pt_model(**A).to_tuple() _UpperCAmelCase = fx_model(**A).to_tuple() self.assertEqual(len(A) , len(A) , 'Output lengths differ between Flax and PyTorch') for fx_output, pt_output in zip(A , A): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(A) _UpperCAmelCase = model_class.from_pretrained(A , from_pt=A) _UpperCAmelCase = fx_model_loaded(**A).to_tuple() self.assertEqual( len(A) , len(A) , 'Output lengths differ between Flax and PyTorch') for fx_output_loaded, pt_output in zip(A , A): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2) @is_pt_flax_cross_test def _lowerCamelCase ( self : Optional[int]) -> int: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs _UpperCAmelCase = self._prepare_for_class(A , A) _UpperCAmelCase = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class _UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning _UpperCAmelCase = getattr(A , A) _UpperCAmelCase = pt_model_class(A).eval() _UpperCAmelCase = model_class(A , dtype=jnp.floataa) _UpperCAmelCase = load_flax_weights_in_pytorch_model(A , fx_model.params) _UpperCAmelCase , _UpperCAmelCase = pt_inputs['input_ids'].shape _UpperCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(A): _UpperCAmelCase = 0 _UpperCAmelCase = 1 _UpperCAmelCase = 0 _UpperCAmelCase = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): _UpperCAmelCase = pt_model(**A).to_tuple() _UpperCAmelCase = fx_model(**A).to_tuple() self.assertEqual(len(A) , len(A) , 'Output lengths differ between Flax and PyTorch') for fx_output, pt_output in zip(A , A): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(A) _UpperCAmelCase = pt_model_class.from_pretrained(A , from_flax=A) with torch.no_grad(): _UpperCAmelCase = pt_model_loaded(**A).to_tuple() self.assertEqual( len(A) , len(A) , 'Output lengths differ between Flax and PyTorch') for fx_output, pt_output in zip(A , A): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) @tooslow def _lowerCamelCase ( self : Dict) -> Optional[int]: """simple docstring""" for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B') _UpperCAmelCase = model(np.ones((1, 1))) self.assertIsNotNone(A)
339
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str: '''simple docstring''' try: _UpperCAmelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _UpperCAmelCase = default else: # KEY is set, convert it to True or False. try: _UpperCAmelCase = strtobool(_UpperCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False) def A ( _UpperCAmelCase : List[str] ) -> List[str]: '''simple docstring''' return unittest.skip('Test was skipped' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> str: '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> str: '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> Dict: '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> List[str]: '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : str ) -> str: '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> str: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> int: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> Any: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict: '''simple docstring''' if test_case is None: return partial(_UpperCAmelCase , version=_UpperCAmelCase ) return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> int: '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase ) UpperCAmelCase__ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def A ( _UpperCAmelCase : List[str] ) -> Any: '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase ) class __lowerCAmelCase ( unittest.TestCase ): UpperCamelCase = True @classmethod def _lowerCamelCase ( cls : List[Any]) -> Tuple: """simple docstring""" _UpperCAmelCase = tempfile.mkdtemp() @classmethod def _lowerCamelCase ( cls : Union[str, Any]) -> str: """simple docstring""" if os.path.exists(cls.tmpdir): shutil.rmtree(cls.tmpdir) def _lowerCamelCase ( self : List[str]) -> List[Any]: """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir).glob('**/*'): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(A) class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Dict) -> Tuple: """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple: """simple docstring""" _UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop) def A ( _UpperCAmelCase : List[Any] ) -> int: '''simple docstring''' _UpperCAmelCase = AcceleratorState() _UpperCAmelCase = tensor[None].clone().to(state.device ) _UpperCAmelCase = gather(_UpperCAmelCase ).cpu() _UpperCAmelCase = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , _UpperCAmelCase ): return False return True class __lowerCAmelCase : def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]: """simple docstring""" _UpperCAmelCase = returncode _UpperCAmelCase = stdout _UpperCAmelCase = stderr async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]: '''simple docstring''' while True: _UpperCAmelCase = await stream.readline() if line: callback(_UpperCAmelCase ) else: break async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput: '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(_UpperCAmelCase ) ) _UpperCAmelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _UpperCAmelCase = [] _UpperCAmelCase = [] def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ): _UpperCAmelCase = line.decode('utf-8' ).rstrip() sink.append(_UpperCAmelCase ) if not quiet: print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ), ] , timeout=_UpperCAmelCase , ) return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase ) def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput: '''simple docstring''' _UpperCAmelCase = asyncio.get_event_loop() _UpperCAmelCase = loop.run_until_complete( _stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) ) _UpperCAmelCase = ' '.join(_UpperCAmelCase ) if result.returncode > 0: _UpperCAmelCase = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class __lowerCAmelCase ( A ): pass def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple: '''simple docstring''' try: _UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_UpperCAmelCase , 'decode' ): _UpperCAmelCase = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
339
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: UpperCAmelCase__ = None UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} UpperCAmelCase__ = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json" ), }, } UpperCAmelCase__ = { "facebook/nllb-large-en-ro": 1024, "facebook/nllb-200-distilled-600M": 1024, } # fmt: off UpperCAmelCase__ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"] class __lowerCAmelCase ( A ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = ['''input_ids''', '''attention_mask'''] UpperCamelCase = NllbTokenizer UpperCamelCase = [] UpperCamelCase = [] def __init__( self : Any , A : Dict=None , A : str=None , A : Optional[Any]="<s>" , A : Optional[int]="</s>" , A : Tuple="</s>" , A : Union[str, Any]="<s>" , A : Union[str, Any]="<unk>" , A : Union[str, Any]="<pad>" , A : Union[str, Any]="<mask>" , A : List[str]=None , A : List[str]=None , A : Optional[Any]=None , A : List[str]=False , **A : Optional[Any] , ) -> int: """simple docstring""" _UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token _UpperCAmelCase = legacy_behaviour super().__init__( vocab_file=A , tokenizer_file=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , src_lang=A , tgt_lang=A , additional_special_tokens=A , legacy_behaviour=A , **A , ) _UpperCAmelCase = vocab_file _UpperCAmelCase = False if not self.vocab_file else True _UpperCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens}) _UpperCAmelCase = { lang_code: self.convert_tokens_to_ids(A) for lang_code in FAIRSEQ_LANGUAGE_CODES } _UpperCAmelCase = src_lang if src_lang is not None else 'eng_Latn' _UpperCAmelCase = self.convert_tokens_to_ids(self._src_lang) _UpperCAmelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _lowerCamelCase ( self : Optional[int]) -> str: """simple docstring""" return self._src_lang @src_lang.setter def _lowerCamelCase ( self : str , A : str) -> None: """simple docstring""" _UpperCAmelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _lowerCamelCase ( self : List[str] , A : List[int] , A : Optional[List[int]] = None) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None) -> List[int]: """simple docstring""" _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : str , A : Optional[str] , A : Optional[str] , **A : int) -> str: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') _UpperCAmelCase = src_lang _UpperCAmelCase = self(A , add_special_tokens=A , return_tensors=A , **A) _UpperCAmelCase = self.convert_tokens_to_ids(A) _UpperCAmelCase = tgt_lang_id return inputs def _lowerCamelCase ( self : Union[str, Any] , A : List[str] , A : str = "eng_Latn" , A : Optional[List[str]] = None , A : str = "fra_Latn" , **A : Union[str, Any] , ) -> BatchEncoding: """simple docstring""" _UpperCAmelCase = src_lang _UpperCAmelCase = tgt_lang return super().prepare_seqaseq_batch(A , A , **A) def _lowerCamelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang) def _lowerCamelCase ( self : Dict) -> Union[str, Any]: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang) def _lowerCamelCase ( self : int , A : Dict) -> None: """simple docstring""" _UpperCAmelCase = self.convert_tokens_to_ids(A) if self.legacy_behaviour: _UpperCAmelCase = [] _UpperCAmelCase = [self.eos_token_id, self.cur_lang_code] else: _UpperCAmelCase = [self.cur_lang_code] _UpperCAmelCase = [self.eos_token_id] _UpperCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens) _UpperCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens) _UpperCAmelCase = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self : Tuple , A : str) -> None: """simple docstring""" _UpperCAmelCase = self.convert_tokens_to_ids(A) if self.legacy_behaviour: _UpperCAmelCase = [] _UpperCAmelCase = [self.eos_token_id, self.cur_lang_code] else: _UpperCAmelCase = [self.cur_lang_code] _UpperCAmelCase = [self.eos_token_id] _UpperCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens) _UpperCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens) _UpperCAmelCase = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self : str , A : str , A : Optional[str] = None) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(A): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return _UpperCAmelCase = os.path.join( A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(A): copyfile(self.vocab_file , A) return (out_vocab_file,)
339
from __future__ import annotations UpperCAmelCase__ = list[list[int]] # assigning initial values to the grid UpperCAmelCase__ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase__ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool: '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None: '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def A ( _UpperCAmelCase : Matrix ) -> Matrix | None: '''simple docstring''' if location := find_empty_location(_UpperCAmelCase ): _UpperCAmelCase , _UpperCAmelCase = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): _UpperCAmelCase = digit if sudoku(_UpperCAmelCase ) is not None: return grid _UpperCAmelCase = 0 return None def A ( _UpperCAmelCase : Matrix ) -> None: '''simple docstring''' for row in grid: for cell in row: print(_UpperCAmelCase , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") UpperCAmelCase__ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
339
1
import numpy as np def A ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float = 1E-12 , _UpperCAmelCase : int = 100 , ) -> tuple[float, np.ndarray]: '''simple docstring''' assert np.shape(_UpperCAmelCase )[0] == np.shape(_UpperCAmelCase )[1] # Ensure proper dimensionality. assert np.shape(_UpperCAmelCase )[0] == np.shape(_UpperCAmelCase )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(_UpperCAmelCase ) == np.iscomplexobj(_UpperCAmelCase ) _UpperCAmelCase = np.iscomplexobj(_UpperCAmelCase ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(_UpperCAmelCase , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. _UpperCAmelCase = False _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = 1E12 while not convergence: # Multiple matrix by the vector. _UpperCAmelCase = np.dot(_UpperCAmelCase , _UpperCAmelCase ) # Normalize the resulting output vector. _UpperCAmelCase = w / np.linalg.norm(_UpperCAmelCase ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) _UpperCAmelCase = vector.conj().T if is_complex else vector.T _UpperCAmelCase = np.dot(_UpperCAmelCase , np.dot(_UpperCAmelCase , _UpperCAmelCase ) ) # Check convergence. _UpperCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: _UpperCAmelCase = True _UpperCAmelCase = lambda_ if is_complex: _UpperCAmelCase = np.real(lambda_ ) return lambda_, vector def A ( ) -> None: '''simple docstring''' _UpperCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) _UpperCAmelCase = np.array([41, 4, 20] ) _UpperCAmelCase = real_input_matrix.astype(np.complexaaa ) _UpperCAmelCase = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T _UpperCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": _UpperCAmelCase = real_input_matrix _UpperCAmelCase = real_vector elif problem_type == "complex": _UpperCAmelCase = complex_input_matrix _UpperCAmelCase = complex_vector # Our implementation. _UpperCAmelCase , _UpperCAmelCase = power_iteration(_UpperCAmelCase , _UpperCAmelCase ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). _UpperCAmelCase , _UpperCAmelCase = np.linalg.eigh(_UpperCAmelCase ) # Last eigenvalue is the maximum one. _UpperCAmelCase = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. _UpperCAmelCase = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(_UpperCAmelCase ) - np.abs(_UpperCAmelCase ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
339
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : List[Any]) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence'), 'references': datasets.Value('string' , id='sequence'), }) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]: """simple docstring""" import nltk nltk.download('wordnet') if NLTK_VERSION >= version.Version('3.6.5'): nltk.download('punkt') if NLTK_VERSION >= version.Version('3.6.6'): nltk.download('omw-1.4') def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any: """simple docstring""" if NLTK_VERSION >= version.Version('3.6.5'): _UpperCAmelCase = [ meteor_score.single_meteor_score( word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] else: _UpperCAmelCase = [ meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] return {"meteor": np.mean(A)}
339
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
339
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration UpperCAmelCase__ = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' _UpperCAmelCase = ['layers', 'blocks'] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def A ( _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = list(s_dict.keys() ) for key in keys: _UpperCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: _UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase ) print(F"{key} -> {new_key}" ) _UpperCAmelCase = s_dict.pop(_UpperCAmelCase ) return s_dict def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = emb.weight.shape _UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) _UpperCAmelCase = emb.weight.data return lin_layer def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes: '''simple docstring''' os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) _UpperCAmelCase = os.path.basename(_UpperCAmelCase ) _UpperCAmelCase = url.split('/' )[-2] _UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ): raise RuntimeError(F"{download_target} exists and is not a regular file" ) if os.path.isfile(_UpperCAmelCase ): _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output: with tqdm( total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop: while True: _UpperCAmelCase = source.read(8_192 ) if not buffer: break output.write(_UpperCAmelCase ) loop.update(len(_UpperCAmelCase ) ) _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( 'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' ) return model_bytes def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' if ".pt" not in checkpoint_path: _UpperCAmelCase = _download(_MODELS[checkpoint_path] ) else: _UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' ) _UpperCAmelCase = original_checkpoint['dims'] _UpperCAmelCase = original_checkpoint['model_state_dict'] _UpperCAmelCase = state_dict['decoder.token_embedding.weight'] remove_ignore_keys_(_UpperCAmelCase ) rename_keys(_UpperCAmelCase ) _UpperCAmelCase = True _UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0] _UpperCAmelCase = WhisperConfig( vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , ) _UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F" but all the following weights are missing {missing}" ) if tie_embeds: _UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _UpperCAmelCase = proj_out_weights model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase__ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
339
1
def A ( _UpperCAmelCase : int = 50 ) -> int: '''simple docstring''' _UpperCAmelCase = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
339
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__) class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ): UpperCamelCase = None UpperCamelCase = None class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ): UpperCamelCase = datasets.Audio() UpperCamelCase = '''audio''' UpperCamelCase = AudioFolderConfig UpperCamelCase = 42 # definition at the bottom of the script UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' ) UpperCAmelCase__ = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] UpperCAmelCase__ = AUDIO_EXTENSIONS
339
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor UpperCAmelCase__ = logging.get_logger(__name__) class __lowerCAmelCase ( A ): def __init__( self : List[str] , *A : int , **A : int) -> None: """simple docstring""" warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' , A , ) super().__init__(*A , **A)
339
import sys from collections import defaultdict class __lowerCAmelCase : def __init__( self : int) -> str: """simple docstring""" _UpperCAmelCase = [] def _lowerCamelCase ( self : Any , A : List[str]) -> int: """simple docstring""" return self.node_position[vertex] def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = pos def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _UpperCAmelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _UpperCAmelCase = 2 * start + 1 else: _UpperCAmelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: _UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child] _UpperCAmelCase , _UpperCAmelCase = ( heap[start], positions[start], ) _UpperCAmelCase , _UpperCAmelCase = temp, tempa _UpperCAmelCase = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , A) self.top_to_bottom(A , A , A , A) def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any: """simple docstring""" _UpperCAmelCase = position[index] while index != 0: _UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: _UpperCAmelCase = heap[parent] _UpperCAmelCase = position[parent] self.set_position(position[parent] , A) else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , A) break _UpperCAmelCase = parent else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , 0) def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str: """simple docstring""" _UpperCAmelCase = len(A) // 2 - 1 for i in range(A , -1 , -1): self.top_to_bottom(A , A , len(A) , A) def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = positions[0] _UpperCAmelCase = sys.maxsize self.top_to_bottom(A , 0 , len(A) , A) return temp def A ( _UpperCAmelCase : int ) -> Any: '''simple docstring''' _UpperCAmelCase = Heap() _UpperCAmelCase = [0] * len(_UpperCAmelCase ) _UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex _UpperCAmelCase = [] for vertex in range(len(_UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_UpperCAmelCase ) heap.node_position.append(_UpperCAmelCase ) _UpperCAmelCase = [] _UpperCAmelCase = 1 _UpperCAmelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: _UpperCAmelCase = 0 _UpperCAmelCase = distance heap.heapify(_UpperCAmelCase , _UpperCAmelCase ) for _ in range(1 , len(_UpperCAmelCase ) ): _UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _UpperCAmelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_UpperCAmelCase )] ): _UpperCAmelCase = distance heap.bottom_to_top( _UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase__ = int(input("Enter number of edges: ").strip()) UpperCAmelCase__ = defaultdict(list) for _ in range(edges_number): UpperCAmelCase__ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
339
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=A ) class __lowerCAmelCase ( A ): UpperCamelCase = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCamelCase = Features({'''audio''': Audio()} ) UpperCamelCase = Features({'''labels''': ClassLabel} ) UpperCamelCase = "audio" UpperCamelCase = "labels" def _lowerCamelCase ( self : Optional[Any] , A : List[Any]) -> Optional[Any]: """simple docstring""" if self.label_column not in features: raise ValueError(F"Column {self.label_column} is not present in features.") if not isinstance(features[self.label_column] , A): raise ValueError(F"Column {self.label_column} is not a ClassLabel.") _UpperCAmelCase = copy.deepcopy(self) _UpperCAmelCase = self.label_schema.copy() _UpperCAmelCase = features[self.label_column] _UpperCAmelCase = label_schema return task_template @property def _lowerCamelCase ( self : int) -> Dict[str, str]: """simple docstring""" return { self.audio_column: "audio", self.label_column: "labels", }
339
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]: '''simple docstring''' # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>' ) == 1 _UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1 _UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple _UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _UpperCAmelCase = logits[0, masked_index, :] _UpperCAmelCase = logits.softmax(dim=0 ) _UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 ) _UpperCAmelCase = ' '.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] ) _UpperCAmelCase = tokenizer.mask_token _UpperCAmelCase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ): _UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' ) if " {0}".format(_UpperCAmelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base") UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() UpperCAmelCase__ = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
339
1
from __future__ import annotations def A ( _UpperCAmelCase : list[int] ) -> bool: '''simple docstring''' return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
import math import unittest def A ( _UpperCAmelCase : int ) -> bool: '''simple docstring''' assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" self.assertTrue(is_prime(2)) self.assertTrue(is_prime(3)) self.assertTrue(is_prime(5)) self.assertTrue(is_prime(7)) self.assertTrue(is_prime(11)) self.assertTrue(is_prime(13)) self.assertTrue(is_prime(17)) self.assertTrue(is_prime(19)) self.assertTrue(is_prime(23)) self.assertTrue(is_prime(29)) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" with self.assertRaises(A): is_prime(-19) self.assertFalse( is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , ) self.assertFalse( is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , ) self.assertFalse(is_prime(2 * 2)) self.assertFalse(is_prime(2 * 3)) self.assertFalse(is_prime(3 * 3)) self.assertFalse(is_prime(3 * 5)) self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": unittest.main()
339
1
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class __lowerCAmelCase ( A ): UpperCamelCase = 42 UpperCamelCase = jnp.floataa UpperCamelCase = True def _lowerCamelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" super().setup() _UpperCAmelCase = nn.Dense(5 , dtype=self.dtype) def __call__( self : Dict , *A : Union[str, Any] , **A : List[Any]) -> Dict: """simple docstring""" _UpperCAmelCase = super().__call__(*A , **A) _UpperCAmelCase = self.cls(outputs[2]) return outputs[:2] + (cls_out,) class __lowerCAmelCase ( A ): UpperCamelCase = FlaxBigBirdForNaturalQuestionsModule def A ( _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] ) -> int: '''simple docstring''' def cross_entropy(_UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str=None ): _UpperCAmelCase = logits.shape[-1] _UpperCAmelCase = (labels[..., None] == jnp.arange(_UpperCAmelCase )[None]).astype('f4' ) _UpperCAmelCase = jax.nn.log_softmax(_UpperCAmelCase , axis=-1 ) _UpperCAmelCase = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: _UpperCAmelCase = reduction(_UpperCAmelCase ) return loss _UpperCAmelCase = partial(_UpperCAmelCase , reduction=jnp.mean ) _UpperCAmelCase = cross_entropy(_UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = cross_entropy(_UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = cross_entropy(_UpperCAmelCase , _UpperCAmelCase ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class __lowerCAmelCase : UpperCamelCase = "google/bigbird-roberta-base" UpperCamelCase = 3_0_0_0 UpperCamelCase = 1_0_5_0_0 UpperCamelCase = 1_2_8 UpperCamelCase = 3 UpperCamelCase = 1 UpperCamelCase = 5 # tx_args UpperCamelCase = 3e-5 UpperCamelCase = 0.0 UpperCamelCase = 2_0_0_0_0 UpperCamelCase = 0.0_095 UpperCamelCase = "bigbird-roberta-natural-questions" UpperCamelCase = "training-expt" UpperCamelCase = "data/nq-training.jsonl" UpperCamelCase = "data/nq-validation.jsonl" def _lowerCamelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" os.makedirs(self.base_dir , exist_ok=A) _UpperCAmelCase = os.path.join(self.base_dir , self.save_dir) _UpperCAmelCase = self.batch_size_per_device * jax.device_count() @dataclass class __lowerCAmelCase : UpperCamelCase = 42 UpperCamelCase = 4_0_9_6 # no dynamic padding on TPUs def __call__( self : List[str] , A : Any) -> str: """simple docstring""" _UpperCAmelCase = self.collate_fn(A) _UpperCAmelCase = jax.tree_util.tree_map(A , A) return batch def _lowerCamelCase ( self : Union[str, Any] , A : Union[str, Any]) -> List[str]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.fetch_inputs(features['input_ids']) _UpperCAmelCase = { 'input_ids': jnp.array(A , dtype=jnp.intaa), 'attention_mask': jnp.array(A , dtype=jnp.intaa), 'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa), 'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa), 'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa), } return batch def _lowerCamelCase ( self : Tuple , A : list) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = [self._fetch_inputs(A) for ids in input_ids] return zip(*A) def _lowerCamelCase ( self : Optional[Any] , A : list) -> List[Any]: """simple docstring""" _UpperCAmelCase = [1 for _ in range(len(A))] while len(A) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=None ) -> int: '''simple docstring''' if seed is not None: _UpperCAmelCase = dataset.shuffle(seed=_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) // batch_size ): _UpperCAmelCase = dataset[i * batch_size : (i + 1) * batch_size] yield dict(_UpperCAmelCase ) @partial(jax.pmap , axis_name='batch' ) def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Any , **_UpperCAmelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' def loss_fn(_UpperCAmelCase : Any ): _UpperCAmelCase = model_inputs.pop('start_labels' ) _UpperCAmelCase = model_inputs.pop('end_labels' ) _UpperCAmelCase = model_inputs.pop('pooled_labels' ) _UpperCAmelCase = state.apply_fn(**_UpperCAmelCase , params=_UpperCAmelCase , dropout_rng=_UpperCAmelCase , train=_UpperCAmelCase ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = outputs return state.loss_fn( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) _UpperCAmelCase , _UpperCAmelCase = jax.random.split(_UpperCAmelCase ) _UpperCAmelCase = jax.value_and_grad(_UpperCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = grad_fn(state.params ) _UpperCAmelCase = jax.lax.pmean({'loss': loss} , axis_name='batch' ) _UpperCAmelCase = jax.lax.pmean(_UpperCAmelCase , 'batch' ) _UpperCAmelCase = state.apply_gradients(grads=_UpperCAmelCase ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name='batch' ) def A ( _UpperCAmelCase : str , **_UpperCAmelCase : int ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = model_inputs.pop('start_labels' ) _UpperCAmelCase = model_inputs.pop('end_labels' ) _UpperCAmelCase = model_inputs.pop('pooled_labels' ) _UpperCAmelCase = state.apply_fn(**_UpperCAmelCase , params=state.params , train=_UpperCAmelCase ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = outputs _UpperCAmelCase = state.loss_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = jax.lax.pmean({'loss': loss} , axis_name='batch' ) return metrics class __lowerCAmelCase ( train_state.TrainState ): UpperCamelCase = struct.field(pytree_node=A ) @dataclass class __lowerCAmelCase : UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = None def _lowerCamelCase ( self : Any , A : List[str] , A : Union[str, Any] , A : str , A : Optional[int]=None) -> Optional[int]: """simple docstring""" _UpperCAmelCase = model.params _UpperCAmelCase = TrainState.create( apply_fn=model.__call__ , params=A , tx=A , loss_fn=A , ) if ckpt_dir is not None: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = restore_checkpoint(A , A) _UpperCAmelCase = { 'lr': args.lr, 'init_lr': args.init_lr, 'warmup_steps': args.warmup_steps, 'num_train_steps': num_train_steps, 'weight_decay': args.weight_decay, } _UpperCAmelCase , _UpperCAmelCase = build_tx(**A) _UpperCAmelCase = train_state.TrainState( step=A , apply_fn=model.__call__ , params=A , tx=A , opt_state=A , ) _UpperCAmelCase = args _UpperCAmelCase = data_collator _UpperCAmelCase = lr _UpperCAmelCase = params _UpperCAmelCase = jax_utils.replicate(A) return state def _lowerCamelCase ( self : Optional[Any] , A : Dict , A : Dict , A : Dict) -> str: """simple docstring""" _UpperCAmelCase = self.args _UpperCAmelCase = len(A) // args.batch_size _UpperCAmelCase = jax.random.PRNGKey(0) _UpperCAmelCase = jax.random.split(A , jax.device_count()) for epoch in range(args.max_epochs): _UpperCAmelCase = jnp.array(0 , dtype=jnp.floataa) _UpperCAmelCase = get_batched_dataset(A , args.batch_size , seed=A) _UpperCAmelCase = 0 for batch in tqdm(A , total=A , desc=F"Running EPOCH-{epoch}"): _UpperCAmelCase = self.data_collator(A) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.train_step_fn(A , A , **A) running_loss += jax_utils.unreplicate(metrics['loss']) i += 1 if i % args.logging_steps == 0: _UpperCAmelCase = jax_utils.unreplicate(state.step) _UpperCAmelCase = running_loss.item() / i _UpperCAmelCase = self.scheduler_fn(state_step - 1) _UpperCAmelCase = self.evaluate(A , A) _UpperCAmelCase = { 'step': state_step.item(), 'eval_loss': eval_loss.item(), 'tr_loss': tr_loss, 'lr': lr.item(), } tqdm.write(str(A)) self.logger.log(A , commit=A) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F"-e{epoch}-s{i}" , state=A) def _lowerCamelCase ( self : int , A : Tuple , A : List[str]) -> Optional[int]: """simple docstring""" _UpperCAmelCase = get_batched_dataset(A , self.args.batch_size) _UpperCAmelCase = len(A) // self.args.batch_size _UpperCAmelCase = jnp.array(0 , dtype=jnp.floataa) _UpperCAmelCase = 0 for batch in tqdm(A , total=A , desc='Evaluating ... '): _UpperCAmelCase = self.data_collator(A) _UpperCAmelCase = self.val_step_fn(A , **A) running_loss += jax_utils.unreplicate(metrics['loss']) i += 1 return running_loss / i def _lowerCamelCase ( self : Optional[Any] , A : List[Any] , A : List[Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = jax_utils.unreplicate(A) print(F"SAVING CHECKPOINT IN {save_dir}" , end=' ... ') self.model_save_fn(A , params=state.params) with open(os.path.join(A , 'opt_state.msgpack') , 'wb') as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args , os.path.join(A , 'args.joblib')) joblib.dump(self.data_collator , os.path.join(A , 'data_collator.joblib')) with open(os.path.join(A , 'training_state.json') , 'w') as f: json.dump({'step': state.step.item()} , A) print('DONE') def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' print(F"RESTORING CHECKPOINT FROM {save_dir}" , end=' ... ' ) with open(os.path.join(_UpperCAmelCase , 'flax_model.msgpack' ) , 'rb' ) as f: _UpperCAmelCase = from_bytes(state.params , f.read() ) with open(os.path.join(_UpperCAmelCase , 'opt_state.msgpack' ) , 'rb' ) as f: _UpperCAmelCase = from_bytes(state.opt_state , f.read() ) _UpperCAmelCase = joblib.load(os.path.join(_UpperCAmelCase , 'args.joblib' ) ) _UpperCAmelCase = joblib.load(os.path.join(_UpperCAmelCase , 'data_collator.joblib' ) ) with open(os.path.join(_UpperCAmelCase , 'training_state.json' ) , 'r' ) as f: _UpperCAmelCase = json.load(_UpperCAmelCase ) _UpperCAmelCase = training_state['step'] print('DONE' ) return params, opt_state, step, args, data_collator def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] ) -> Any: '''simple docstring''' _UpperCAmelCase = num_train_steps - warmup_steps _UpperCAmelCase = optax.linear_schedule(init_value=_UpperCAmelCase , end_value=_UpperCAmelCase , transition_steps=_UpperCAmelCase ) _UpperCAmelCase = optax.linear_schedule(init_value=_UpperCAmelCase , end_value=1E-7 , transition_steps=_UpperCAmelCase ) _UpperCAmelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] ) -> List[str]: '''simple docstring''' def weight_decay_mask(_UpperCAmelCase : Tuple ): _UpperCAmelCase = traverse_util.flatten_dict(_UpperCAmelCase ) _UpperCAmelCase = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()} return traverse_util.unflatten_dict(_UpperCAmelCase ) _UpperCAmelCase = scheduler_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = optax.adamw(learning_rate=_UpperCAmelCase , weight_decay=_UpperCAmelCase , mask=_UpperCAmelCase ) return tx, lr
339
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : str) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=A , hypotheses=A , min_len=A , max_len=A) }
339
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class __lowerCAmelCase ( A ): UpperCamelCase = '''ctrl''' UpperCamelCase = ['''past_key_values'''] UpperCamelCase = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Optional[Any] , A : Tuple=24_65_34 , A : str=2_56 , A : Any=12_80 , A : Optional[int]=81_92 , A : Optional[Any]=48 , A : Optional[Any]=16 , A : List[Any]=0.1 , A : Union[str, Any]=0.1 , A : List[str]=1E-6 , A : List[str]=0.0_2 , A : Optional[int]=True , **A : Union[str, Any] , ) -> Any: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = dff _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache super().__init__(**A)
339
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer UpperCAmelCase__ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast UpperCAmelCase__ = TaTokenizerFast UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys UpperCAmelCase__ = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
339
1
def A ( _UpperCAmelCase : int ) -> int: '''simple docstring''' if divisor % 5 == 0 or divisor % 2 == 0: return 0 _UpperCAmelCase = 1 _UpperCAmelCase = 1 while repunit: _UpperCAmelCase = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def A ( _UpperCAmelCase : int = 1_000_000 ) -> int: '''simple docstring''' _UpperCAmelCase = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(_UpperCAmelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f"""{solution() = }""")
339
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class __lowerCAmelCase ( A ): UpperCamelCase = '''open-llama''' def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = intermediate_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = rms_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = kwargs.pop( 'use_memorry_efficient_attention' , A) _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_dropout_prob _UpperCAmelCase = use_stable_embedding _UpperCAmelCase = shared_input_output_embedding _UpperCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , ) def _lowerCamelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F"got {self.rope_scaling}") _UpperCAmelCase = self.rope_scaling.get('type' , A) _UpperCAmelCase = self.rope_scaling.get('factor' , A) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
339
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCAmelCase__ = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } UpperCAmelCase__ = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } UpperCAmelCase__ = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class __lowerCAmelCase ( A ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_INIT_CONFIGURATION UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = SqueezeBertTokenizer def __init__( self : List[str] , A : Dict=None , A : int=None , A : List[str]=True , A : Any="[UNK]" , A : int="[SEP]" , A : List[str]="[PAD]" , A : Union[str, Any]="[CLS]" , A : Optional[Any]="[MASK]" , A : Union[str, Any]=True , A : Union[str, Any]=None , **A : str , ) -> List[Any]: """simple docstring""" super().__init__( A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , ) _UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get('lowercase' , A) != do_lower_case or normalizer_state.get('strip_accents' , A) != strip_accents or normalizer_state.get('handle_chinese_chars' , A) != tokenize_chinese_chars ): _UpperCAmelCase = getattr(A , normalizer_state.pop('type')) _UpperCAmelCase = do_lower_case _UpperCAmelCase = strip_accents _UpperCAmelCase = tokenize_chinese_chars _UpperCAmelCase = normalizer_class(**A) _UpperCAmelCase = do_lower_case def _lowerCamelCase ( self : List[str] , A : Any , A : List[Any]=None) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCamelCase ( self : List[Any] , A : List[int] , A : Optional[List[int]] = None) -> List[int]: """simple docstring""" _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _lowerCamelCase ( self : Optional[Any] , A : str , A : Optional[str] = None) -> Tuple[str]: """simple docstring""" _UpperCAmelCase = self._tokenizer.model.save(A , name=A) return tuple(A)
339
def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') ) def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = credit_card_number _UpperCAmelCase = 0 _UpperCAmelCase = len(_UpperCAmelCase ) - 2 for i in range(_UpperCAmelCase , -1 , -2 ): # double the value of every second digit _UpperCAmelCase = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 _UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(F"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(_UpperCAmelCase ) <= 16: print(F"{error_message} of its length." ) return False if not validate_initial_digits(_UpperCAmelCase ): print(F"{error_message} of its first two digits." ) return False if not luhn_validation(_UpperCAmelCase ): print(F"{error_message} it fails the Luhn check." ) return False print(F"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("4111111111111111") validate_credit_card_number("32323")
339
1
from datetime import datetime as dt import os from github import Github UpperCAmelCase__ = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def A ( ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = Github(os.environ['GITHUB_TOKEN'] ) _UpperCAmelCase = g.get_repo('huggingface/transformers' ) _UpperCAmelCase = repo.get_issues(state='open' ) for issue in open_issues: _UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase ) _UpperCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='closed' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) if __name__ == "__main__": main()
339
from functools import reduce UpperCAmelCase__ = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def A ( _UpperCAmelCase : str = N ) -> int: '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(_UpperCAmelCase ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
339
1
from manim import * class __lowerCAmelCase ( A ): def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" _UpperCAmelCase = Rectangle(height=0.5 , width=0.5) _UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0) _UpperCAmelCase = [mem.copy() for i in range(6)] _UpperCAmelCase = [mem.copy() for i in range(6)] _UpperCAmelCase = VGroup(*A).arrange(A , buff=0) _UpperCAmelCase = VGroup(*A).arrange(A , buff=0) _UpperCAmelCase = VGroup(A , A).arrange(A , buff=0) _UpperCAmelCase = Text('CPU' , font_size=24) _UpperCAmelCase = Group(A , A).arrange(A , buff=0.5 , aligned_edge=A) cpu.move_to([-2.5, -0.5, 0]) self.add(A) _UpperCAmelCase = [mem.copy() for i in range(1)] _UpperCAmelCase = VGroup(*A).arrange(A , buff=0) _UpperCAmelCase = Text('GPU' , font_size=24) _UpperCAmelCase = Group(A , A).arrange(A , buff=0.5 , aligned_edge=A) gpu.align_to(A , A) gpu.set_x(gpu.get_x() - 1) self.add(A) _UpperCAmelCase = [mem.copy() for i in range(6)] _UpperCAmelCase = VGroup(*A).arrange(A , buff=0) _UpperCAmelCase = Text('Model' , font_size=24) _UpperCAmelCase = Group(A , A).arrange(A , buff=0.5 , aligned_edge=A) model.move_to([3, -1.0, 0]) self.play( Create(A , run_time=1) , Create(A , run_time=1) , Create(A , run_time=1) , ) _UpperCAmelCase = MarkupText( F"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=24 , ) _UpperCAmelCase = Square(side_length=2.2) key.move_to([-5, 2, 0]) _UpperCAmelCase = MarkupText( F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0]) step_a.move_to([2, 2, 0]) self.play(Write(A , run_time=2.5) , Write(A) , Write(A)) self.add(A) _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] for i, rect in enumerate(A): _UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0.0).set_fill(A , opacity=0.7) cpu_target.move_to(A) cpu_target.generate_target() _UpperCAmelCase = 0.4_6 / 4 _UpperCAmelCase = 0.4_6 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=A) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=A , buff=0.0) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=A , buff=0.0) cpu_targs.append(A) first_animations.append(rect.animate(run_time=0.5).set_stroke(A)) second_animations.append(MoveToTarget(A , run_time=1.5)) self.play(*A) self.play(*A) self.wait()
339
from __future__ import annotations from collections.abc import Callable UpperCAmelCase__ = list[list[float | int]] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for row in range(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = matrix[row][col] _UpperCAmelCase = vector[row][0] _UpperCAmelCase = 0 _UpperCAmelCase = 0 while row < size and col < size: # pivoting _UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: _UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , _UpperCAmelCase ): _UpperCAmelCase = augmented[rowa][col] / augmented[row][col] _UpperCAmelCase = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , _UpperCAmelCase ): for row in range(_UpperCAmelCase ): _UpperCAmelCase = augmented[row][col] / augmented[col][col] for cola in range(_UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase ) ] def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for x_val, y_val in enumerate(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = (x_val + 1) ** (size - col - 1) _UpperCAmelCase = y_val _UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase ) def interpolated_func(_UpperCAmelCase : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(_UpperCAmelCase ) ) return interpolated_func def A ( _UpperCAmelCase : int ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int: '''simple docstring''' _UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )] _UpperCAmelCase = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] _UpperCAmelCase = 0 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for poly in polynomials: _UpperCAmelCase = 1 while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ): x_val += 1 ret += poly(_UpperCAmelCase ) return ret if __name__ == "__main__": print(f"""{solution() = }""")
339
1
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class __lowerCAmelCase ( A ): def __init__( self : Dict) -> Tuple: """simple docstring""" _UpperCAmelCase = [] def _lowerCamelCase ( self : str , A : str , A : Optional[int] , A : List[Any] , **A : str) -> Optional[Any]: """simple docstring""" self.events.append('on_init_end') def _lowerCamelCase ( self : str , A : int , A : Tuple , A : List[str] , **A : Union[str, Any]) -> int: """simple docstring""" self.events.append('on_train_begin') def _lowerCamelCase ( self : Dict , A : Any , A : Dict , A : Optional[int] , **A : str) -> List[str]: """simple docstring""" self.events.append('on_train_end') def _lowerCamelCase ( self : Union[str, Any] , A : Union[str, Any] , A : Optional[Any] , A : Any , **A : str) -> Optional[int]: """simple docstring""" self.events.append('on_epoch_begin') def _lowerCamelCase ( self : List[str] , A : Optional[int] , A : Optional[int] , A : int , **A : Optional[int]) -> str: """simple docstring""" self.events.append('on_epoch_end') def _lowerCamelCase ( self : Union[str, Any] , A : Tuple , A : Optional[int] , A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]: """simple docstring""" self.events.append('on_step_begin') def _lowerCamelCase ( self : Optional[int] , A : Tuple , A : List[str] , A : int , **A : str) -> Dict: """simple docstring""" self.events.append('on_step_end') def _lowerCamelCase ( self : Union[str, Any] , A : Optional[Any] , A : Tuple , A : Optional[int] , **A : Dict) -> Tuple: """simple docstring""" self.events.append('on_evaluate') def _lowerCamelCase ( self : Optional[int] , A : List[str] , A : str , A : str , **A : Optional[int]) -> int: """simple docstring""" self.events.append('on_predict') def _lowerCamelCase ( self : Optional[Any] , A : List[str] , A : List[str] , A : Tuple , **A : Any) -> Union[str, Any]: """simple docstring""" self.events.append('on_save') def _lowerCamelCase ( self : Any , A : Union[str, Any] , A : Any , A : Dict , **A : int) -> Optional[int]: """simple docstring""" self.events.append('on_log') def _lowerCamelCase ( self : Union[str, Any] , A : Union[str, Any] , A : List[Any] , A : Dict , **A : Optional[Any]) -> List[Any]: """simple docstring""" self.events.append('on_prediction_step') @require_torch class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Dict) -> List[Any]: """simple docstring""" _UpperCAmelCase = tempfile.mkdtemp() def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" shutil.rmtree(self.output_dir) def _lowerCamelCase ( self : Any , A : int=0 , A : Tuple=0 , A : str=64 , A : str=64 , A : Optional[Any]=None , A : Tuple=False , **A : str) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = RegressionDataset(length=A) _UpperCAmelCase = RegressionDataset(length=A) _UpperCAmelCase = RegressionModelConfig(a=A , b=A) _UpperCAmelCase = RegressionPreTrainedModel(A) _UpperCAmelCase = TrainingArguments(self.output_dir , disable_tqdm=A , report_to=[] , **A) return Trainer( A , A , train_dataset=A , eval_dataset=A , callbacks=A , ) def _lowerCamelCase ( self : List[Any] , A : Union[str, Any] , A : Dict) -> Optional[int]: """simple docstring""" self.assertEqual(len(A) , len(A)) # Order doesn't matter _UpperCAmelCase = sorted(A , key=lambda A: cb.__name__ if isinstance(A , A) else cb.__class__.__name__) _UpperCAmelCase = sorted(A , key=lambda A: cb.__name__ if isinstance(A , A) else cb.__class__.__name__) for cba, cba in zip(A , A): if isinstance(A , A) and isinstance(A , A): self.assertEqual(A , A) elif isinstance(A , A) and not isinstance(A , A): self.assertEqual(A , cba.__class__) elif not isinstance(A , A) and isinstance(A , A): self.assertEqual(cba.__class__ , A) else: self.assertEqual(A , A) def _lowerCamelCase ( self : int , A : Optional[int]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = ['on_init_end', 'on_train_begin'] _UpperCAmelCase = 0 _UpperCAmelCase = len(trainer.get_eval_dataloader()) _UpperCAmelCase = ['on_prediction_step'] * len(trainer.get_eval_dataloader()) + ['on_log', 'on_evaluate'] for _ in range(trainer.state.num_train_epochs): expected_events.append('on_epoch_begin') for _ in range(A): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('on_log') if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('on_save') expected_events.append('on_epoch_end') if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def _lowerCamelCase ( self : Dict) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.get_trainer() _UpperCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , A) # Callbacks passed at init are added to the default callbacks _UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback]) expected_callbacks.append(A) self.check_callbacks_equality(trainer.callback_handler.callbacks , A) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _UpperCAmelCase = self.get_trainer(disable_tqdm=A) _UpperCAmelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , A) def _lowerCamelCase ( self : List[str]) -> List[str]: """simple docstring""" _UpperCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] _UpperCAmelCase = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(A) expected_callbacks.remove(A) self.check_callbacks_equality(trainer.callback_handler.callbacks , A) _UpperCAmelCase = self.get_trainer() _UpperCAmelCase = trainer.pop_callback(A) self.assertEqual(cb.__class__ , A) self.check_callbacks_equality(trainer.callback_handler.callbacks , A) trainer.add_callback(A) expected_callbacks.insert(0 , A) self.check_callbacks_equality(trainer.callback_handler.callbacks , A) # We can also add, pop, or remove by instance _UpperCAmelCase = self.get_trainer() _UpperCAmelCase = trainer.callback_handler.callbacks[0] trainer.remove_callback(A) expected_callbacks.remove(A) self.check_callbacks_equality(trainer.callback_handler.callbacks , A) _UpperCAmelCase = self.get_trainer() _UpperCAmelCase = trainer.callback_handler.callbacks[0] _UpperCAmelCase = trainer.pop_callback(A) self.assertEqual(A , A) self.check_callbacks_equality(trainer.callback_handler.callbacks , A) trainer.add_callback(A) expected_callbacks.insert(0 , A) self.check_callbacks_equality(trainer.callback_handler.callbacks , A) def _lowerCamelCase ( self : List[Any]) -> Any: """simple docstring""" import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='ignore' , category=A) _UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() _UpperCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A)) # Independent log/save/eval _UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5) trainer.train() _UpperCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A)) _UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5) trainer.train() _UpperCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A)) _UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps') trainer.train() _UpperCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A)) _UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch') trainer.train() _UpperCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A)) # A bit of everything _UpperCAmelCase = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , ) trainer.train() _UpperCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A)) # warning should be emitted for duplicated callbacks with patch('transformers.trainer_callback.logger.warning') as warn_mock: _UpperCAmelCase = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(A) in warn_mock.call_args[0][0]
339
from __future__ import annotations def A ( _UpperCAmelCase : list[int] ) -> bool: '''simple docstring''' return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> int: '''simple docstring''' for attribute in key.split('.' ): _UpperCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase ) if weight_type is not None: _UpperCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape else: _UpperCAmelCase = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": _UpperCAmelCase = value elif weight_type == "weight_g": _UpperCAmelCase = value elif weight_type == "weight_v": _UpperCAmelCase = value elif weight_type == "bias": _UpperCAmelCase = value else: _UpperCAmelCase = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] ) -> List[str]: '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = fairseq_model.state_dict() _UpperCAmelCase = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _UpperCAmelCase = False if "conv_layers" in name: load_conv_layer( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) _UpperCAmelCase = True else: for key, mapped_key in MAPPING.items(): _UpperCAmelCase = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: _UpperCAmelCase = True if "*" in mapped_key: _UpperCAmelCase = name.split(_UpperCAmelCase )[0].split('.' )[-2] _UpperCAmelCase = mapped_key.replace('*' , _UpperCAmelCase ) if "weight_g" in name: _UpperCAmelCase = 'weight_g' elif "weight_v" in name: _UpperCAmelCase = 'weight_v' elif "weight" in name: _UpperCAmelCase = 'weight' elif "bias" in name: _UpperCAmelCase = 'bias' else: _UpperCAmelCase = None set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) continue if not is_used: unused_weights.append(_UpperCAmelCase ) logger.warning(F"Unused weights: {unused_weights}" ) def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = full_name.split('conv_layers.' )[-1] _UpperCAmelCase = name.split('.' ) _UpperCAmelCase = int(items[0] ) _UpperCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) _UpperCAmelCase = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) _UpperCAmelCase = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) _UpperCAmelCase = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) _UpperCAmelCase = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_UpperCAmelCase ) def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> List[str]: '''simple docstring''' _UpperCAmelCase = SEWConfig() if is_finetuned: _UpperCAmelCase = model.wav_encoder.wav_model.cfg else: _UpperCAmelCase = model.cfg _UpperCAmelCase = fs_config.conv_bias _UpperCAmelCase = eval(fs_config.conv_feature_layers ) _UpperCAmelCase = [x[0] for x in conv_layers] _UpperCAmelCase = [x[1] for x in conv_layers] _UpperCAmelCase = [x[2] for x in conv_layers] _UpperCAmelCase = 'gelu' _UpperCAmelCase = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group' _UpperCAmelCase = 0.0 _UpperCAmelCase = fs_config.activation_fn.name _UpperCAmelCase = fs_config.encoder_embed_dim _UpperCAmelCase = 0.02 _UpperCAmelCase = fs_config.encoder_ffn_embed_dim _UpperCAmelCase = 1E-5 _UpperCAmelCase = fs_config.encoder_layerdrop _UpperCAmelCase = fs_config.encoder_attention_heads _UpperCAmelCase = fs_config.conv_pos_groups _UpperCAmelCase = fs_config.conv_pos _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = fs_config.encoder_layers _UpperCAmelCase = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _UpperCAmelCase = model.cfg _UpperCAmelCase = fs_config.final_dropout _UpperCAmelCase = fs_config.layerdrop _UpperCAmelCase = fs_config.activation_dropout _UpperCAmelCase = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _UpperCAmelCase = fs_config.attention_dropout _UpperCAmelCase = fs_config.dropout_input _UpperCAmelCase = fs_config.dropout _UpperCAmelCase = fs_config.mask_channel_length _UpperCAmelCase = fs_config.mask_channel_prob _UpperCAmelCase = fs_config.mask_length _UpperCAmelCase = fs_config.mask_prob _UpperCAmelCase = 'Wav2Vec2FeatureExtractor' _UpperCAmelCase = 'Wav2Vec2CTCTokenizer' return config @torch.no_grad() def A ( _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Any=True ) -> str: '''simple docstring''' if is_finetuned: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _UpperCAmelCase = SEWConfig.from_pretrained(_UpperCAmelCase ) else: _UpperCAmelCase = convert_config(model[0] , _UpperCAmelCase ) _UpperCAmelCase = model[0].eval() _UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False _UpperCAmelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ) if is_finetuned: if dict_path: _UpperCAmelCase = Dictionary.load(_UpperCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _UpperCAmelCase = target_dict.pad_index _UpperCAmelCase = target_dict.bos_index _UpperCAmelCase = target_dict.pad_index _UpperCAmelCase = target_dict.bos_index _UpperCAmelCase = target_dict.eos_index _UpperCAmelCase = len(target_dict.symbols ) _UpperCAmelCase = os.path.join(_UpperCAmelCase , 'vocab.json' ) if not os.path.isdir(_UpperCAmelCase ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCAmelCase ) ) return os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , _UpperCAmelCase ) _UpperCAmelCase = WavaVecaCTCTokenizer( _UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCAmelCase , ) _UpperCAmelCase = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) processor.save_pretrained(_UpperCAmelCase ) _UpperCAmelCase = SEWForCTC(_UpperCAmelCase ) else: _UpperCAmelCase = SEWModel(_UpperCAmelCase ) feature_extractor.save_pretrained(_UpperCAmelCase ) recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) hf_model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) UpperCAmelCase__ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
339
import os UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} def A ( _UpperCAmelCase : str ) -> int: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(_UpperCAmelCase ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A ( _UpperCAmelCase : int ) -> str: '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1_000 numerals += m_count * "M" num %= 1_000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int: '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase ) _UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase ) savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase ) return savings if __name__ == "__main__": print(f"""{solution() = }""")
339
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer UpperCAmelCase__ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast UpperCAmelCase__ = TaTokenizerFast UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys UpperCAmelCase__ = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
339
import requests from bsa import BeautifulSoup def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str: '''simple docstring''' _UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' ) _UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} ) _UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' ) return anchors[2].get_text() if __name__ == "__main__": UpperCAmelCase__ = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
339
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase__ = { "configuration_roberta_prelayernorm": [ "ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaPreLayerNormConfig", "RobertaPreLayerNormOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaPreLayerNormForCausalLM", "RobertaPreLayerNormForMaskedLM", "RobertaPreLayerNormForMultipleChoice", "RobertaPreLayerNormForQuestionAnswering", "RobertaPreLayerNormForSequenceClassification", "RobertaPreLayerNormForTokenClassification", "RobertaPreLayerNormModel", "RobertaPreLayerNormPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaPreLayerNormForCausalLM", "TFRobertaPreLayerNormForMaskedLM", "TFRobertaPreLayerNormForMultipleChoice", "TFRobertaPreLayerNormForQuestionAnswering", "TFRobertaPreLayerNormForSequenceClassification", "TFRobertaPreLayerNormForTokenClassification", "TFRobertaPreLayerNormMainLayer", "TFRobertaPreLayerNormModel", "TFRobertaPreLayerNormPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "FlaxRobertaPreLayerNormForCausalLM", "FlaxRobertaPreLayerNormForMaskedLM", "FlaxRobertaPreLayerNormForMultipleChoice", "FlaxRobertaPreLayerNormForQuestionAnswering", "FlaxRobertaPreLayerNormForSequenceClassification", "FlaxRobertaPreLayerNormForTokenClassification", "FlaxRobertaPreLayerNormModel", "FlaxRobertaPreLayerNormPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_attention_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_choices def _lowerCamelCase ( self : Optional[Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCAmelCase = None if self.use_attention_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCAmelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCamelCase ( self : List[Any]) -> List[str]: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = True UpperCamelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" _UpperCAmelCase = FlaxRoFormerModelTester(self) @slow def _lowerCamelCase ( self : List[Any]) -> Dict: """simple docstring""" for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A) _UpperCAmelCase = model(np.ones((1, 1))) self.assertIsNotNone(A) @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def _lowerCamelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base') _UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]]) _UpperCAmelCase = model(A)[0] _UpperCAmelCase = 5_00_00 _UpperCAmelCase = (1, 6, vocab_size) self.assertEqual(output.shape , A) _UpperCAmelCase = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]]) self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
339
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def A ( _UpperCAmelCase : List[str] ) -> List[str]: '''simple docstring''' _UpperCAmelCase = 'huggingface/label-files' _UpperCAmelCase = 'imagenet-1k-id2label.json' _UpperCAmelCase = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) ) _UpperCAmelCase = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} _UpperCAmelCase = {v: k for k, v in idalabel.items()} _UpperCAmelCase = 'std_conv' if 'bit' in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" _UpperCAmelCase = BitConfig( conv_layer=_UpperCAmelCase , num_labels=1_000 , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase , ) return config def A ( _UpperCAmelCase : Union[str, Any] ) -> str: '''simple docstring''' if "stem.conv" in name: _UpperCAmelCase = name.replace('stem.conv' , 'bit.embedder.convolution' ) if "blocks" in name: _UpperCAmelCase = name.replace('blocks' , 'layers' ) if "head.fc" in name: _UpperCAmelCase = name.replace('head.fc' , 'classifier.1' ) if name.startswith('norm' ): _UpperCAmelCase = 'bit.' + name if "bit" not in name and "classifier" not in name: _UpperCAmelCase = 'bit.encoder.' + name return name def A ( ) -> int: '''simple docstring''' _UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) return im @torch.no_grad() def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=False ) -> Tuple: '''simple docstring''' _UpperCAmelCase = get_config(_UpperCAmelCase ) # load original model from timm _UpperCAmelCase = create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ) timm_model.eval() # load state_dict of original model _UpperCAmelCase = timm_model.state_dict() for key in state_dict.copy().keys(): _UpperCAmelCase = state_dict.pop(_UpperCAmelCase ) _UpperCAmelCase = val.squeeze() if 'head' in key else val # load HuggingFace model _UpperCAmelCase = BitForImageClassification(_UpperCAmelCase ) model.eval() model.load_state_dict(_UpperCAmelCase ) # create image processor _UpperCAmelCase = create_transform(**resolve_data_config({} , model=_UpperCAmelCase ) ) _UpperCAmelCase = transform.transforms _UpperCAmelCase = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } _UpperCAmelCase = BitImageProcessor( do_resize=_UpperCAmelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCAmelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=_UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _UpperCAmelCase = prepare_img() _UpperCAmelCase = transform(_UpperCAmelCase ).unsqueeze(0 ) _UpperCAmelCase = processor(_UpperCAmelCase , return_tensors='pt' ).pixel_values # verify pixel values assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase ) # verify logits with torch.no_grad(): _UpperCAmelCase = model(_UpperCAmelCase ) _UpperCAmelCase = outputs.logits print('Logits:' , logits[0, :3] ) print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] ) _UpperCAmelCase = timm_model(_UpperCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1E-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" ) model.save_pretrained(_UpperCAmelCase ) processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: print(F"Pushing model {model_name} and processor to the hub" ) model.push_to_hub(F"ybelkada/{model_name}" ) processor.push_to_hub(F"ybelkada/{model_name}" ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="resnetv2_50x1_bitm", type=str, help="Name of the BiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) UpperCAmelCase__ = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
339
UpperCAmelCase__ = {} def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: '''simple docstring''' # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _UpperCAmelCase = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 ) _UpperCAmelCase = state_late + state_absent + state_ontime _UpperCAmelCase = prizestrings return prizestrings def A ( _UpperCAmelCase : int = 30 ) -> int: '''simple docstring''' return _calculate(_UpperCAmelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
339
1
from random import shuffle import tensorflow as tf from numpy import array def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = int(_UpperCAmelCase ) assert noofclusters < len(_UpperCAmelCase ) # Find out the dimensionality _UpperCAmelCase = len(vectors[0] ) # Will help select random centroids from among the available vectors _UpperCAmelCase = list(range(len(_UpperCAmelCase ) ) ) shuffle(_UpperCAmelCase ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. _UpperCAmelCase = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION _UpperCAmelCase = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points _UpperCAmelCase = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(_UpperCAmelCase ) ] ##These nodes will assign the centroid Variables the appropriate ##values _UpperCAmelCase = tf.placeholder('float64' , [dim] ) _UpperCAmelCase = [] for centroid in centroids: cent_assigns.append(tf.assign(_UpperCAmelCase , _UpperCAmelCase ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) _UpperCAmelCase = [tf.Variable(0 ) for i in range(len(_UpperCAmelCase ) )] ##These nodes will assign an assignment Variable the appropriate ##value _UpperCAmelCase = tf.placeholder('int32' ) _UpperCAmelCase = [] for assignment in assignments: cluster_assigns.append(tf.assign(_UpperCAmelCase , _UpperCAmelCase ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input _UpperCAmelCase = tf.placeholder('float' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors _UpperCAmelCase = tf.reduce_mean(_UpperCAmelCase , 0 ) ##Node for computing Euclidean distances # Placeholders for input _UpperCAmelCase = tf.placeholder('float' , [dim] ) _UpperCAmelCase = tf.placeholder('float' , [dim] ) _UpperCAmelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_UpperCAmelCase , _UpperCAmelCase ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input _UpperCAmelCase = tf.placeholder('float' , [noofclusters] ) _UpperCAmelCase = tf.argmin(_UpperCAmelCase , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. _UpperCAmelCase = tf.initialize_all_variables() # Initialize all variables sess.run(_UpperCAmelCase ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. _UpperCAmelCase = 100 for _ in range(_UpperCAmelCase ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(_UpperCAmelCase ) ): _UpperCAmelCase = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. _UpperCAmelCase = [ sess.run(_UpperCAmelCase , feed_dict={va: vect, va: sess.run(_UpperCAmelCase )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input _UpperCAmelCase = sess.run( _UpperCAmelCase , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(_UpperCAmelCase ): # Collect all the vectors assigned to this cluster _UpperCAmelCase = [ vectors[i] for i in range(len(_UpperCAmelCase ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location _UpperCAmelCase = sess.run( _UpperCAmelCase , feed_dict={mean_input: array(_UpperCAmelCase )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments _UpperCAmelCase = sess.run(_UpperCAmelCase ) _UpperCAmelCase = sess.run(_UpperCAmelCase ) return centroids, assignments
339
import os import sys import unittest UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers") class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = find_backend(' if not is_torch_available():') self.assertEqual(A , 'torch') # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):') self.assertEqual(A , 'torch_and_transformers') # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _UpperCAmelCase = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):') self.assertEqual(A , 'torch_and_transformers_and_onnx') def _lowerCamelCase ( self : int) -> Dict: """simple docstring""" _UpperCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , A) self.assertIn('torch_and_transformers' , A) self.assertIn('flax_and_transformers' , A) self.assertIn('torch_and_transformers_and_onnx' , A) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch']) self.assertIn('FlaxUNet2DConditionModel' , objects['flax']) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers']) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers']) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy']) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx']) def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'') self.assertEqual(A , '\nCONSTANT = None\n') _UpperCAmelCase = create_dummy_object('function' , '\'torch\'') self.assertEqual( A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n') _UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' _UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'') self.assertEqual(A , A) def _lowerCamelCase ( self : Dict) -> int: """simple docstring""" _UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' _UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']}) self.assertEqual(dummy_files['torch'] , A)
339
1
from __future__ import annotations from collections import Counter from random import random class __lowerCAmelCase : def __init__( self : List[Any]) -> Dict: """simple docstring""" _UpperCAmelCase = {} def _lowerCamelCase ( self : Tuple , A : str) -> None: """simple docstring""" _UpperCAmelCase = {} def _lowerCamelCase ( self : Dict , A : str , A : str , A : float) -> None: """simple docstring""" if nodea not in self.connections: self.add_node(A) if nodea not in self.connections: self.add_node(A) _UpperCAmelCase = probability def _lowerCamelCase ( self : Union[str, Any]) -> list[str]: """simple docstring""" return list(self.connections) def _lowerCamelCase ( self : List[Any] , A : str) -> str: """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def A ( _UpperCAmelCase : str , _UpperCAmelCase : list[tuple[str, str, float]] , _UpperCAmelCase : int ) -> dict[str, int]: '''simple docstring''' _UpperCAmelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = Counter(graph.get_nodes() ) _UpperCAmelCase = start for _ in range(_UpperCAmelCase ): _UpperCAmelCase = graph.transition(_UpperCAmelCase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
339
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") UpperCAmelCase__ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , ) UpperCamelCase = field( default=1_0_2_4 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} ) def _lowerCamelCase ( self : str) -> List[Any]: """simple docstring""" if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.') else: _UpperCAmelCase = self.train_file.split('.')[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." _UpperCAmelCase = self.validation_file.split('.')[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __lowerCAmelCase : UpperCamelCase = field( default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) def A ( ) -> Optional[int]: '''simple docstring''' # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_UpperCAmelCase ) datasets.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. _UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: _UpperCAmelCase = data_args.train_file.split('.' )[-1] _UpperCAmelCase = data_args.test_file.split('.' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." _UpperCAmelCase = data_args.test_file else: raise ValueError('Need either a GLUE task or a test file for `do_predict`.' ) for key in data_files.keys(): logger.info(F"load a local file for {key}: {data_files[key]}" ) if data_args.train_file.endswith('.csv' ): # Loading a dataset from local csv files _UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files _UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels _UpperCAmelCase = raw_datasets['train'].features['label'].names _UpperCAmelCase = len(_UpperCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer _UpperCAmelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , ) _UpperCAmelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: _UpperCAmelCase = 'max_length' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _UpperCAmelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. _UpperCAmelCase = {'Refused': 0, 'Entailed': 1} _UpperCAmelCase = {0: 'Refused', 1: 'Entailed'} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ): # Tokenize the texts def _convert_table_text_to_pandas(_UpperCAmelCase : Dict ): _UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )] _UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd _UpperCAmelCase = examples['statement'] _UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) ) _UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ) _UpperCAmelCase = examples['label'] return result with training_args.main_process_first(desc='dataset map pre-processing' ): _UpperCAmelCase = raw_datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) _UpperCAmelCase = raw_datasets['train'] if data_args.max_train_samples is not None: _UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) _UpperCAmelCase = raw_datasets['validation'] if data_args.max_eval_samples is not None: _UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('--do_predict requires a test dataset' ) _UpperCAmelCase = raw_datasets['test'] if data_args.max_predict_samples is not None: _UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ): logger.info(F"Sample {index} of the training set: {train_dataset[index]}." ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_UpperCAmelCase : EvalPrediction ): _UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _UpperCAmelCase = default_data_collator elif training_args.fpaa: _UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) else: _UpperCAmelCase = None # Initialize our Trainer _UpperCAmelCase = Trainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase ) _UpperCAmelCase = train_result.metrics _UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase ) ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('train' , _UpperCAmelCase ) trainer.save_metrics('train' , _UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase ) _UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.log_metrics('eval' , _UpperCAmelCase ) trainer.save_metrics('eval' , _UpperCAmelCase ) if training_args.do_predict: logger.info('*** Predict ***' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. _UpperCAmelCase = predict_dataset.remove_columns('label' ) _UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) _UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' ) if trainer.is_world_process_zero(): with open(_UpperCAmelCase , 'w' ) as writer: logger.info('***** Predict Results *****' ) writer.write('index\tprediction\n' ) for index, item in enumerate(_UpperCAmelCase ): _UpperCAmelCase = label_list[item] writer.write(F"{index}\t{item}\n" ) _UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'} if training_args.push_to_hub: trainer.push_to_hub(**_UpperCAmelCase ) else: trainer.create_model_card(**_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
1
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = UnCLIPImageVariationPipeline UpperCamelCase = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''} UpperCamelCase = IMAGE_VARIATION_BATCH_PARAMS UpperCamelCase = [ '''generator''', '''return_dict''', '''decoder_num_inference_steps''', '''super_res_num_inference_steps''', ] UpperCamelCase = False @property def _lowerCamelCase ( self : int) -> Optional[int]: """simple docstring""" return 32 @property def _lowerCamelCase ( self : str) -> Optional[Any]: """simple docstring""" return 32 @property def _lowerCamelCase ( self : str) -> int: """simple docstring""" return self.time_input_dim @property def _lowerCamelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" return self.time_input_dim * 4 @property def _lowerCamelCase ( self : Optional[int]) -> int: """simple docstring""" return 1_00 @property def _lowerCamelCase ( self : Tuple) -> str: """simple docstring""" _UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') return tokenizer @property def _lowerCamelCase ( self : Optional[Any]) -> Tuple: """simple docstring""" torch.manual_seed(0) _UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(A) @property def _lowerCamelCase ( self : Optional[int]) -> List[str]: """simple docstring""" torch.manual_seed(0) _UpperCAmelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) return CLIPVisionModelWithProjection(A) @property def _lowerCamelCase ( self : List[str]) -> Optional[int]: """simple docstring""" torch.manual_seed(0) _UpperCAmelCase = { 'clip_embeddings_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'cross_attention_dim': self.cross_attention_dim, } _UpperCAmelCase = UnCLIPTextProjModel(**A) return model @property def _lowerCamelCase ( self : List[str]) -> Optional[Any]: """simple docstring""" torch.manual_seed(0) _UpperCAmelCase = { 'sample_size': 32, # RGB in channels 'in_channels': 3, # Out channels is double in channels because predicts mean and variance 'out_channels': 6, 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': 'identity', } _UpperCAmelCase = UNetaDConditionModel(**A) return model @property def _lowerCamelCase ( self : str) -> int: """simple docstring""" return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def _lowerCamelCase ( self : Tuple) -> Optional[int]: """simple docstring""" torch.manual_seed(0) _UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs) return model @property def _lowerCamelCase ( self : str) -> int: """simple docstring""" torch.manual_seed(1) _UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs) return model def _lowerCamelCase ( self : str) -> str: """simple docstring""" _UpperCAmelCase = self.dummy_decoder _UpperCAmelCase = self.dummy_text_proj _UpperCAmelCase = self.dummy_text_encoder _UpperCAmelCase = self.dummy_tokenizer _UpperCAmelCase = self.dummy_super_res_first _UpperCAmelCase = self.dummy_super_res_last _UpperCAmelCase = UnCLIPScheduler( variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=10_00 , ) _UpperCAmelCase = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=10_00 , ) _UpperCAmelCase = CLIPImageProcessor(crop_size=32 , size=32) _UpperCAmelCase = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def _lowerCamelCase ( self : str , A : Dict , A : int=0 , A : Tuple=True) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A) if str(A).startswith('mps'): _UpperCAmelCase = torch.manual_seed(A) else: _UpperCAmelCase = torch.Generator(device=A).manual_seed(A) if pil_image: _UpperCAmelCase = input_image * 0.5 + 0.5 _UpperCAmelCase = input_image.clamp(0 , 1) _UpperCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy() _UpperCAmelCase = DiffusionPipeline.numpy_to_pil(A)[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def _lowerCamelCase ( self : str) -> Optional[int]: """simple docstring""" _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**A) _UpperCAmelCase = pipe.to(A) pipe.set_progress_bar_config(disable=A) _UpperCAmelCase = self.get_dummy_inputs(A , pil_image=A) _UpperCAmelCase = pipe(**A) _UpperCAmelCase = output.images _UpperCAmelCase = self.get_dummy_inputs(A , pil_image=A) _UpperCAmelCase = pipe( **A , return_dict=A , )[0] _UpperCAmelCase = image[0, -3:, -3:, -1] _UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCAmelCase = np.array( [ 0.9_9_9_7, 0.0_0_0_2, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_6_9, 0.0_0_2_3, 0.9_9_9_7, 0.9_9_6_9, 0.9_9_7_0, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 def _lowerCamelCase ( self : Optional[int]) -> int: """simple docstring""" _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**A) _UpperCAmelCase = pipe.to(A) pipe.set_progress_bar_config(disable=A) _UpperCAmelCase = self.get_dummy_inputs(A , pil_image=A) _UpperCAmelCase = pipe(**A) _UpperCAmelCase = output.images _UpperCAmelCase = self.get_dummy_inputs(A , pil_image=A) _UpperCAmelCase = pipe( **A , return_dict=A , )[0] _UpperCAmelCase = image[0, -3:, -3:, -1] _UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCAmelCase = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]: """simple docstring""" _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**A) _UpperCAmelCase = pipe.to(A) pipe.set_progress_bar_config(disable=A) _UpperCAmelCase = self.get_dummy_inputs(A , pil_image=A) _UpperCAmelCase = [ pipeline_inputs['image'], pipeline_inputs['image'], ] _UpperCAmelCase = pipe(**A) _UpperCAmelCase = output.images _UpperCAmelCase = self.get_dummy_inputs(A , pil_image=A) _UpperCAmelCase = [ tuple_pipeline_inputs['image'], tuple_pipeline_inputs['image'], ] _UpperCAmelCase = pipe( **A , return_dict=A , )[0] _UpperCAmelCase = image[0, -3:, -3:, -1] _UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) _UpperCAmelCase = np.array( [ 0.9_9_9_7, 0.9_9_8_9, 0.0_0_0_8, 0.0_0_2_1, 0.9_9_6_0, 0.0_0_1_8, 0.0_0_1_4, 0.0_0_0_2, 0.9_9_3_3, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 def _lowerCamelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = torch.device('cpu') class __lowerCAmelCase : UpperCamelCase = 1 _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**A) _UpperCAmelCase = pipe.to(A) pipe.set_progress_bar_config(disable=A) _UpperCAmelCase = torch.Generator(device=A).manual_seed(0) _UpperCAmelCase = pipe.decoder.dtype _UpperCAmelCase = 1 _UpperCAmelCase = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) _UpperCAmelCase = pipe.prepare_latents( A , dtype=A , device=A , generator=A , latents=A , scheduler=DummyScheduler()) _UpperCAmelCase = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) _UpperCAmelCase = pipe.prepare_latents( A , dtype=A , device=A , generator=A , latents=A , scheduler=DummyScheduler()) _UpperCAmelCase = self.get_dummy_inputs(A , pil_image=A) _UpperCAmelCase = pipe( **A , decoder_latents=A , super_res_latents=A).images _UpperCAmelCase = self.get_dummy_inputs(A , pil_image=A) # Don't pass image, instead pass embedding _UpperCAmelCase = pipeline_inputs.pop('image') _UpperCAmelCase = pipe.image_encoder(A).image_embeds _UpperCAmelCase = pipe( **A , decoder_latents=A , super_res_latents=A , image_embeddings=A , ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a).max() < 1E-4 @skip_mps def _lowerCamelCase ( self : Optional[Any]) -> Any: """simple docstring""" _UpperCAmelCase = torch_device == 'cpu' # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor _UpperCAmelCase = 1E-2 self._test_attention_slicing_forward_pass( test_max_difference=A , expected_max_diff=A) @skip_mps def _lowerCamelCase ( self : Any) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = torch_device == 'cpu' _UpperCAmelCase = True _UpperCAmelCase = [ 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] self._test_inference_batch_single_identical( test_max_difference=A , relax_max_difference=A , additional_params_copy_to_batched_inputs=A , ) def _lowerCamelCase ( self : str) -> Tuple: """simple docstring""" _UpperCAmelCase = [ 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes _UpperCAmelCase = [2, 3] self._test_inference_batch_consistent( batch_sizes=A , additional_params_copy_to_batched_inputs=A , ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=A) @skip_mps def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" return super().test_dict_tuple_outputs_equivalent() @skip_mps def _lowerCamelCase ( self : Dict) -> List[str]: """simple docstring""" return super().test_save_load_local() @skip_mps def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" return super().test_save_load_optional_components() @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Any) -> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : Tuple) -> List[Any]: """simple docstring""" _UpperCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png') _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/unclip/karlo_v1_alpha_cat_variation_fp16.npy') _UpperCAmelCase = UnCLIPImageVariationPipeline.from_pretrained( 'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa) _UpperCAmelCase = pipeline.to(A) pipeline.set_progress_bar_config(disable=A) _UpperCAmelCase = torch.Generator(device='cpu').manual_seed(0) _UpperCAmelCase = pipeline( A , generator=A , output_type='np' , ) _UpperCAmelCase = output.images[0] assert image.shape == (2_56, 2_56, 3) assert_mean_pixel_difference(A , A , 15)
339
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any: '''simple docstring''' _UpperCAmelCase = multiprocessing.Manager() _UpperCAmelCase = manager.list() _UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('timed out' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil _UpperCAmelCase = shutil.rmtree _UpperCAmelCase = os.rmdir _UpperCAmelCase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: _UpperCAmelCase = {} with swallow_io(): with time_limit(_UpperCAmelCase ): exec(_UpperCAmelCase , _UpperCAmelCase ) result.append('passed' ) except TimeoutException: result.append('timed out' ) except BaseException as e: result.append(F"failed: {e}" ) # Needed for cleaning up. _UpperCAmelCase = rmtree _UpperCAmelCase = rmdir _UpperCAmelCase = chdir @contextlib.contextmanager def A ( _UpperCAmelCase : Union[str, Any] ) -> Any: '''simple docstring''' def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ): raise TimeoutException('Timed out!' ) signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase ) signal.signal(signal.SIGALRM , _UpperCAmelCase ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def A ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = WriteOnlyStringIO() with contextlib.redirect_stdout(_UpperCAmelCase ): with contextlib.redirect_stderr(_UpperCAmelCase ): with redirect_stdin(_UpperCAmelCase ): yield @contextlib.contextmanager def A ( ) -> Any: '''simple docstring''' with tempfile.TemporaryDirectory() as dirname: with chdir(_UpperCAmelCase ): yield dirname class __lowerCAmelCase ( A ): pass class __lowerCAmelCase ( io.StringIO ): def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any: """simple docstring""" raise OSError def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]: """simple docstring""" raise OSError def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]: """simple docstring""" raise OSError def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]: """simple docstring""" return False class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore UpperCamelCase = '''stdin''' @contextlib.contextmanager def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' if root == ".": yield return _UpperCAmelCase = os.getcwd() os.chdir(_UpperCAmelCase ) try: yield except BaseException as exc: raise exc finally: os.chdir(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str]=None ) -> Any: '''simple docstring''' if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins _UpperCAmelCase = None _UpperCAmelCase = None import os _UpperCAmelCase = '1' _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import shutil _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import subprocess _UpperCAmelCase = None # type: ignore _UpperCAmelCase = None import sys _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None
339
1
def A ( _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float: '''simple docstring''' return price * (1 + tax_rate) if __name__ == "__main__": print(f"""{price_plus_tax(100, 0.25) = }""") print(f"""{price_plus_tax(125.50, 0.05) = }""")
339
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str: '''simple docstring''' try: _UpperCAmelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _UpperCAmelCase = default else: # KEY is set, convert it to True or False. try: _UpperCAmelCase = strtobool(_UpperCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False) def A ( _UpperCAmelCase : List[str] ) -> List[str]: '''simple docstring''' return unittest.skip('Test was skipped' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> str: '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> str: '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> Dict: '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> List[str]: '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : str ) -> str: '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> str: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> int: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> Any: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict: '''simple docstring''' if test_case is None: return partial(_UpperCAmelCase , version=_UpperCAmelCase ) return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> int: '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase ) UpperCAmelCase__ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def A ( _UpperCAmelCase : List[str] ) -> Any: '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase ) class __lowerCAmelCase ( unittest.TestCase ): UpperCamelCase = True @classmethod def _lowerCamelCase ( cls : List[Any]) -> Tuple: """simple docstring""" _UpperCAmelCase = tempfile.mkdtemp() @classmethod def _lowerCamelCase ( cls : Union[str, Any]) -> str: """simple docstring""" if os.path.exists(cls.tmpdir): shutil.rmtree(cls.tmpdir) def _lowerCamelCase ( self : List[str]) -> List[Any]: """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir).glob('**/*'): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(A) class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Dict) -> Tuple: """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple: """simple docstring""" _UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop) def A ( _UpperCAmelCase : List[Any] ) -> int: '''simple docstring''' _UpperCAmelCase = AcceleratorState() _UpperCAmelCase = tensor[None].clone().to(state.device ) _UpperCAmelCase = gather(_UpperCAmelCase ).cpu() _UpperCAmelCase = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , _UpperCAmelCase ): return False return True class __lowerCAmelCase : def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]: """simple docstring""" _UpperCAmelCase = returncode _UpperCAmelCase = stdout _UpperCAmelCase = stderr async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]: '''simple docstring''' while True: _UpperCAmelCase = await stream.readline() if line: callback(_UpperCAmelCase ) else: break async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput: '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(_UpperCAmelCase ) ) _UpperCAmelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _UpperCAmelCase = [] _UpperCAmelCase = [] def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ): _UpperCAmelCase = line.decode('utf-8' ).rstrip() sink.append(_UpperCAmelCase ) if not quiet: print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ), ] , timeout=_UpperCAmelCase , ) return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase ) def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput: '''simple docstring''' _UpperCAmelCase = asyncio.get_event_loop() _UpperCAmelCase = loop.run_until_complete( _stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) ) _UpperCAmelCase = ' '.join(_UpperCAmelCase ) if result.returncode > 0: _UpperCAmelCase = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class __lowerCAmelCase ( A ): pass def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple: '''simple docstring''' try: _UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_UpperCAmelCase , 'decode' ): _UpperCAmelCase = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
339
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase__ = { "configuration_chinese_clip": [ "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "ChineseCLIPConfig", "ChineseCLIPOnnxConfig", "ChineseCLIPTextConfig", "ChineseCLIPVisionConfig", ], "processing_chinese_clip": ["ChineseCLIPProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["ChineseCLIPFeatureExtractor"] UpperCAmelCase__ = ["ChineseCLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "ChineseCLIPModel", "ChineseCLIPPreTrainedModel", "ChineseCLIPTextModel", "ChineseCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
from __future__ import annotations UpperCAmelCase__ = list[list[int]] # assigning initial values to the grid UpperCAmelCase__ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase__ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool: '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None: '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def A ( _UpperCAmelCase : Matrix ) -> Matrix | None: '''simple docstring''' if location := find_empty_location(_UpperCAmelCase ): _UpperCAmelCase , _UpperCAmelCase = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): _UpperCAmelCase = digit if sudoku(_UpperCAmelCase ) is not None: return grid _UpperCAmelCase = 0 return None def A ( _UpperCAmelCase : Matrix ) -> None: '''simple docstring''' for row in grid: for cell in row: print(_UpperCAmelCase , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") UpperCAmelCase__ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
339
1
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration UpperCAmelCase__ = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' _UpperCAmelCase = ['layers', 'blocks'] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def A ( _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = list(s_dict.keys() ) for key in keys: _UpperCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: _UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase ) print(F"{key} -> {new_key}" ) _UpperCAmelCase = s_dict.pop(_UpperCAmelCase ) return s_dict def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = emb.weight.shape _UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) _UpperCAmelCase = emb.weight.data return lin_layer def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes: '''simple docstring''' os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) _UpperCAmelCase = os.path.basename(_UpperCAmelCase ) _UpperCAmelCase = url.split('/' )[-2] _UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ): raise RuntimeError(F"{download_target} exists and is not a regular file" ) if os.path.isfile(_UpperCAmelCase ): _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output: with tqdm( total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop: while True: _UpperCAmelCase = source.read(8_192 ) if not buffer: break output.write(_UpperCAmelCase ) loop.update(len(_UpperCAmelCase ) ) _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( 'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' ) return model_bytes def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' if ".pt" not in checkpoint_path: _UpperCAmelCase = _download(_MODELS[checkpoint_path] ) else: _UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' ) _UpperCAmelCase = original_checkpoint['dims'] _UpperCAmelCase = original_checkpoint['model_state_dict'] _UpperCAmelCase = state_dict['decoder.token_embedding.weight'] remove_ignore_keys_(_UpperCAmelCase ) rename_keys(_UpperCAmelCase ) _UpperCAmelCase = True _UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0] _UpperCAmelCase = WhisperConfig( vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , ) _UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F" but all the following weights are missing {missing}" ) if tie_embeds: _UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _UpperCAmelCase = proj_out_weights model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase__ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
339
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : List[Any]) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence'), 'references': datasets.Value('string' , id='sequence'), }) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]: """simple docstring""" import nltk nltk.download('wordnet') if NLTK_VERSION >= version.Version('3.6.5'): nltk.download('punkt') if NLTK_VERSION >= version.Version('3.6.6'): nltk.download('omw-1.4') def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any: """simple docstring""" if NLTK_VERSION >= version.Version('3.6.5'): _UpperCAmelCase = [ meteor_score.single_meteor_score( word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] else: _UpperCAmelCase = [ meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] return {"meteor": np.mean(A)}
339
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available UpperCAmelCase__ = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration UpperCAmelCase__ = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' _UpperCAmelCase = ['layers', 'blocks'] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def A ( _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = list(s_dict.keys() ) for key in keys: _UpperCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: _UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase ) print(F"{key} -> {new_key}" ) _UpperCAmelCase = s_dict.pop(_UpperCAmelCase ) return s_dict def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = emb.weight.shape _UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) _UpperCAmelCase = emb.weight.data return lin_layer def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes: '''simple docstring''' os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) _UpperCAmelCase = os.path.basename(_UpperCAmelCase ) _UpperCAmelCase = url.split('/' )[-2] _UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ): raise RuntimeError(F"{download_target} exists and is not a regular file" ) if os.path.isfile(_UpperCAmelCase ): _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output: with tqdm( total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop: while True: _UpperCAmelCase = source.read(8_192 ) if not buffer: break output.write(_UpperCAmelCase ) loop.update(len(_UpperCAmelCase ) ) _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( 'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' ) return model_bytes def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' if ".pt" not in checkpoint_path: _UpperCAmelCase = _download(_MODELS[checkpoint_path] ) else: _UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' ) _UpperCAmelCase = original_checkpoint['dims'] _UpperCAmelCase = original_checkpoint['model_state_dict'] _UpperCAmelCase = state_dict['decoder.token_embedding.weight'] remove_ignore_keys_(_UpperCAmelCase ) rename_keys(_UpperCAmelCase ) _UpperCAmelCase = True _UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0] _UpperCAmelCase = WhisperConfig( vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , ) _UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F" but all the following weights are missing {missing}" ) if tie_embeds: _UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _UpperCAmelCase = proj_out_weights model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase__ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
339
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class __lowerCAmelCase ( A ): UpperCamelCase = '''xmod''' def __init__( self : Optional[Any] , A : str=3_05_22 , A : List[Any]=7_68 , A : List[str]=12 , A : Union[str, Any]=12 , A : List[Any]=30_72 , A : Tuple="gelu" , A : List[Any]=0.1 , A : Union[str, Any]=0.1 , A : Any=5_12 , A : Tuple=2 , A : Any=0.0_2 , A : Tuple=1E-12 , A : List[str]=1 , A : Any=0 , A : Optional[Any]=2 , A : List[str]="absolute" , A : Union[str, Any]=True , A : int=None , A : Optional[int]=False , A : Optional[int]=2 , A : Tuple=False , A : int=True , A : str=True , A : Tuple=("en_XX",) , A : Optional[Any]=None , **A : List[str] , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout _UpperCAmelCase = pre_norm _UpperCAmelCase = adapter_reduction_factor _UpperCAmelCase = adapter_layer_norm _UpperCAmelCase = adapter_reuse_layer_norm _UpperCAmelCase = ln_before_adapter _UpperCAmelCase = list(A) _UpperCAmelCase = default_language class __lowerCAmelCase ( A ): @property def _lowerCamelCase ( self : Any) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ])
339
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__) class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ): UpperCamelCase = None UpperCamelCase = None class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ): UpperCamelCase = datasets.Audio() UpperCamelCase = '''audio''' UpperCamelCase = AudioFolderConfig UpperCamelCase = 42 # definition at the bottom of the script UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' ) UpperCAmelCase__ = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] UpperCAmelCase__ = AUDIO_EXTENSIONS
339
1
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def A ( _UpperCAmelCase : NDArray[floataa] , _UpperCAmelCase : NDArray[floataa] , _UpperCAmelCase : list[int] , _UpperCAmelCase : int , ) -> list[float]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = coefficient_matrix.shape _UpperCAmelCase , _UpperCAmelCase = constant_matrix.shape if rowsa != colsa: _UpperCAmelCase = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}" raise ValueError(_UpperCAmelCase ) if colsa != 1: _UpperCAmelCase = F"Constant matrix must be nx1 but received {rowsa}x{colsa}" raise ValueError(_UpperCAmelCase ) if rowsa != rowsa: _UpperCAmelCase = ( 'Coefficient and constant matrices dimensions must be nxn and nx1 but ' F"received {rowsa}x{colsa} and {rowsa}x{colsa}" ) raise ValueError(_UpperCAmelCase ) if len(_UpperCAmelCase ) != rowsa: _UpperCAmelCase = ( 'Number of initial values must be equal to number of rows in coefficient ' F"matrix but received {len(_UpperCAmelCase )} and {rowsa}" ) raise ValueError(_UpperCAmelCase ) if iterations <= 0: raise ValueError('Iterations must be at least 1' ) _UpperCAmelCase = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) _UpperCAmelCase , _UpperCAmelCase = table.shape strictly_diagonally_dominant(_UpperCAmelCase ) # Iterates the whole matrix for given number of times for _ in range(_UpperCAmelCase ): _UpperCAmelCase = [] for row in range(_UpperCAmelCase ): _UpperCAmelCase = 0 for col in range(_UpperCAmelCase ): if col == row: _UpperCAmelCase = table[row][col] elif col == cols - 1: _UpperCAmelCase = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] _UpperCAmelCase = (temp + val) / denom new_val.append(_UpperCAmelCase ) _UpperCAmelCase = new_val return [float(_UpperCAmelCase ) for i in new_val] def A ( _UpperCAmelCase : NDArray[floataa] ) -> bool: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = table.shape _UpperCAmelCase = True for i in range(0 , _UpperCAmelCase ): _UpperCAmelCase = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('Coefficient matrix is not strictly diagonally dominant' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
339
import sys from collections import defaultdict class __lowerCAmelCase : def __init__( self : int) -> str: """simple docstring""" _UpperCAmelCase = [] def _lowerCamelCase ( self : Any , A : List[str]) -> int: """simple docstring""" return self.node_position[vertex] def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = pos def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _UpperCAmelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _UpperCAmelCase = 2 * start + 1 else: _UpperCAmelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: _UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child] _UpperCAmelCase , _UpperCAmelCase = ( heap[start], positions[start], ) _UpperCAmelCase , _UpperCAmelCase = temp, tempa _UpperCAmelCase = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , A) self.top_to_bottom(A , A , A , A) def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any: """simple docstring""" _UpperCAmelCase = position[index] while index != 0: _UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: _UpperCAmelCase = heap[parent] _UpperCAmelCase = position[parent] self.set_position(position[parent] , A) else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , A) break _UpperCAmelCase = parent else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , 0) def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str: """simple docstring""" _UpperCAmelCase = len(A) // 2 - 1 for i in range(A , -1 , -1): self.top_to_bottom(A , A , len(A) , A) def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = positions[0] _UpperCAmelCase = sys.maxsize self.top_to_bottom(A , 0 , len(A) , A) return temp def A ( _UpperCAmelCase : int ) -> Any: '''simple docstring''' _UpperCAmelCase = Heap() _UpperCAmelCase = [0] * len(_UpperCAmelCase ) _UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex _UpperCAmelCase = [] for vertex in range(len(_UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_UpperCAmelCase ) heap.node_position.append(_UpperCAmelCase ) _UpperCAmelCase = [] _UpperCAmelCase = 1 _UpperCAmelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: _UpperCAmelCase = 0 _UpperCAmelCase = distance heap.heapify(_UpperCAmelCase , _UpperCAmelCase ) for _ in range(1 , len(_UpperCAmelCase ) ): _UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _UpperCAmelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_UpperCAmelCase )] ): _UpperCAmelCase = distance heap.bottom_to_top( _UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase__ = int(input("Enter number of edges: ").strip()) UpperCAmelCase__ = defaultdict(list) for _ in range(edges_number): UpperCAmelCase__ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
339
1
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def A ( ) -> None: '''simple docstring''' print('Making key files...' ) make_key_files('rsa' , 1_024 ) print('Key files generation successful.' ) def A ( _UpperCAmelCase : int ) -> tuple[tuple[int, int], tuple[int, int]]: '''simple docstring''' print('Generating prime p...' ) _UpperCAmelCase = rabinMiller.generate_large_prime(_UpperCAmelCase ) print('Generating prime q...' ) _UpperCAmelCase = rabinMiller.generate_large_prime(_UpperCAmelCase ) _UpperCAmelCase = p * q print('Generating e that is relatively prime to (p - 1) * (q - 1)...' ) while True: _UpperCAmelCase = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(_UpperCAmelCase , (p - 1) * (q - 1) ) == 1: break print('Calculating d that is mod inverse of e...' ) _UpperCAmelCase = cryptoMath.find_mod_inverse(_UpperCAmelCase , (p - 1) * (q - 1) ) _UpperCAmelCase = (n, e) _UpperCAmelCase = (n, d) return (public_key, private_key) def A ( _UpperCAmelCase : str , _UpperCAmelCase : int ) -> None: '''simple docstring''' if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ): print('\nWARNING:' ) print( F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n" 'Use a different name or delete these files and re-run this program.' ) sys.exit() _UpperCAmelCase , _UpperCAmelCase = generate_key(_UpperCAmelCase ) print(F"\nWriting public key to file {name}_pubkey.txt..." ) with open(F"{name}_pubkey.txt" , 'w' ) as out_file: out_file.write(F"{key_size},{public_key[0]},{public_key[1]}" ) print(F"Writing private key to file {name}_privkey.txt..." ) with open(F"{name}_privkey.txt" , 'w' ) as out_file: out_file.write(F"{key_size},{private_key[0]},{private_key[1]}" ) if __name__ == "__main__": main()
339
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]: '''simple docstring''' # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>' ) == 1 _UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1 _UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple _UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _UpperCAmelCase = logits[0, masked_index, :] _UpperCAmelCase = logits.softmax(dim=0 ) _UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 ) _UpperCAmelCase = ' '.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] ) _UpperCAmelCase = tokenizer.mask_token _UpperCAmelCase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ): _UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' ) if " {0}".format(_UpperCAmelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base") UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() UpperCAmelCase__ = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
339
1
def A ( ) -> Dict: '''simple docstring''' _UpperCAmelCase = 0 for i in range(1 , 1_001 ): total += i**i return str(_UpperCAmelCase )[-10:] if __name__ == "__main__": print(solution())
339
import math import unittest def A ( _UpperCAmelCase : int ) -> bool: '''simple docstring''' assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" self.assertTrue(is_prime(2)) self.assertTrue(is_prime(3)) self.assertTrue(is_prime(5)) self.assertTrue(is_prime(7)) self.assertTrue(is_prime(11)) self.assertTrue(is_prime(13)) self.assertTrue(is_prime(17)) self.assertTrue(is_prime(19)) self.assertTrue(is_prime(23)) self.assertTrue(is_prime(29)) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" with self.assertRaises(A): is_prime(-19) self.assertFalse( is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , ) self.assertFalse( is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , ) self.assertFalse(is_prime(2 * 2)) self.assertFalse(is_prime(2 * 3)) self.assertFalse(is_prime(3 * 3)) self.assertFalse(is_prime(3 * 5)) self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": unittest.main()
339
1
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor UpperCAmelCase__ = logging.get_logger(__name__) class __lowerCAmelCase ( A ): def __init__( self : Tuple , *A : List[Any] , **A : Tuple) -> None: """simple docstring""" warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , A , ) super().__init__(*A , **A)
339
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : str) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=A , hypotheses=A , min_len=A , max_len=A) }
339
1
import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = CodeGenTokenizer UpperCamelCase = CodeGenTokenizerFast UpperCamelCase = True UpperCamelCase = {'''add_prefix_space''': True} UpperCamelCase = False def _lowerCamelCase ( self : str) -> Union[str, Any]: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCAmelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] _UpperCAmelCase = dict(zip(A , range(len(A)))) _UpperCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _UpperCAmelCase = {'unk_token': '<unk>'} _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(A) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(A)) def _lowerCamelCase ( self : Optional[Any] , **A : Union[str, Any]) -> Union[str, Any]: """simple docstring""" kwargs.update(self.special_tokens_map) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A) def _lowerCamelCase ( self : int , **A : str) -> Any: """simple docstring""" kwargs.update(self.special_tokens_map) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A) def _lowerCamelCase ( self : List[str] , A : Tuple) -> List[Any]: """simple docstring""" _UpperCAmelCase = 'lower newer' _UpperCAmelCase = 'lower newer' return input_text, output_text def _lowerCamelCase ( self : Dict) -> Tuple: """simple docstring""" _UpperCAmelCase = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map) _UpperCAmelCase = 'lower newer' _UpperCAmelCase = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] _UpperCAmelCase = tokenizer.tokenize(A , add_prefix_space=A) self.assertListEqual(A , A) _UpperCAmelCase = tokens + [tokenizer.unk_token] _UpperCAmelCase = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A) , A) def _lowerCamelCase ( self : Optional[int]) -> Dict: """simple docstring""" if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer(add_prefix_space=A) _UpperCAmelCase = 'lower newer' # Testing tokenization _UpperCAmelCase = tokenizer.tokenize(A , add_prefix_space=A) _UpperCAmelCase = rust_tokenizer.tokenize(A) self.assertListEqual(A , A) # Testing conversion to ids without special tokens _UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A) _UpperCAmelCase = rust_tokenizer.encode(A , add_special_tokens=A) self.assertListEqual(A , A) # Testing conversion to ids with special tokens _UpperCAmelCase = self.get_rust_tokenizer(add_prefix_space=A) _UpperCAmelCase = tokenizer.encode(A , add_prefix_space=A) _UpperCAmelCase = rust_tokenizer.encode(A) self.assertListEqual(A , A) # Testing the unknown token _UpperCAmelCase = tokens + [rust_tokenizer.unk_token] _UpperCAmelCase = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A) , A) def _lowerCamelCase ( self : int , *A : int , **A : List[Any]) -> Any: """simple docstring""" pass def _lowerCamelCase ( self : Any , A : Optional[Any]=15) -> Any: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A) # Simple input _UpperCAmelCase = 'This is a simple input' _UpperCAmelCase = ['This is a simple input 1', 'This is a simple input 2'] _UpperCAmelCase = ('This is a simple input', 'This is a pair') _UpperCAmelCase = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='max_length') # Simple input self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='max_length') # Simple input self.assertRaises( A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='max_length' , ) # Pair input self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='max_length') # Pair input self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='max_length') # Pair input self.assertRaises( A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='max_length' , ) def _lowerCamelCase ( self : Tuple) -> List[str]: """simple docstring""" _UpperCAmelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>') # Simple input _UpperCAmelCase = 'This is a simple input' _UpperCAmelCase = ['This is a simple input looooooooong', 'This is a simple input'] _UpperCAmelCase = ('This is a simple input', 'This is a pair') _UpperCAmelCase = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] _UpperCAmelCase = tokenizer.pad_token_id _UpperCAmelCase = tokenizer(A , padding='max_length' , max_length=30 , return_tensors='np') _UpperCAmelCase = tokenizer(A , padding=A , truncate=A , return_tensors='np') _UpperCAmelCase = tokenizer(*A , padding='max_length' , max_length=60 , return_tensors='np') _UpperCAmelCase = tokenizer(A , padding=A , truncate=A , return_tensors='np') # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 30) self.assertTrue(pad_token_id in out_s['input_ids']) self.assertTrue(0 in out_s['attention_mask']) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 33) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0]) self.assertFalse(0 in out_sa['attention_mask'][0]) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1]) self.assertTrue(0 in out_sa['attention_mask'][1]) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 60) self.assertTrue(pad_token_id in out_p['input_ids']) self.assertTrue(0 in out_p['attention_mask']) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 52) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0]) self.assertFalse(0 in out_pa['attention_mask'][0]) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1]) self.assertTrue(0 in out_pa['attention_mask'][1]) def _lowerCamelCase ( self : Dict) -> str: """simple docstring""" _UpperCAmelCase = '$$$' _UpperCAmelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A , add_bos_token=A) _UpperCAmelCase = 'This is a simple input' _UpperCAmelCase = ['This is a simple input 1', 'This is a simple input 2'] _UpperCAmelCase = tokenizer.bos_token_id _UpperCAmelCase = tokenizer(A) _UpperCAmelCase = tokenizer(A) self.assertEqual(out_s.input_ids[0] , A) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids)) _UpperCAmelCase = tokenizer.decode(out_s.input_ids) _UpperCAmelCase = tokenizer.batch_decode(out_sa.input_ids) self.assertEqual(decode_s.split()[0] , A) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa)) @slow def _lowerCamelCase ( self : List[Any]) -> List[str]: """simple docstring""" _UpperCAmelCase = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono') _UpperCAmelCase = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#' _UpperCAmelCase = '\nif len_a > len_b: result = a\nelse: result = b' _UpperCAmelCase = tokenizer.encode(A) _UpperCAmelCase = ['^#', re.escape('<|endoftext|>'), '^\'\'\'', '^"""', '\n\n\n'] _UpperCAmelCase = tokenizer.decode(A , truncate_before_pattern=A) self.assertEqual(A , A) def _lowerCamelCase ( self : Tuple) -> List[str]: """simple docstring""" pass
339
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer UpperCAmelCase__ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast UpperCAmelCase__ = TaTokenizerFast UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys UpperCAmelCase__ = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
339
1
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: '''simple docstring''' return abs(_UpperCAmelCase ) if a == 0 else greatest_common_divisor(b % a , _UpperCAmelCase ) def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: '''simple docstring''' while y: # --> when y=0 then loop will terminate and return x as final GCD. _UpperCAmelCase , _UpperCAmelCase = y, x % y return abs(_UpperCAmelCase ) def A ( ) -> Dict: '''simple docstring''' try: _UpperCAmelCase = input('Enter two integers separated by comma (,): ' ).split(',' ) _UpperCAmelCase = int(nums[0] ) _UpperCAmelCase = int(nums[1] ) print( F"greatest_common_divisor({num_a}, {num_a}) = " F"{greatest_common_divisor(_UpperCAmelCase , _UpperCAmelCase )}" ) print(F"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_UpperCAmelCase , _UpperCAmelCase )}" ) except (IndexError, UnboundLocalError, ValueError): print('Wrong input' ) if __name__ == "__main__": main()
339
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class __lowerCAmelCase ( A ): UpperCamelCase = '''open-llama''' def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = intermediate_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = rms_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = kwargs.pop( 'use_memorry_efficient_attention' , A) _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_dropout_prob _UpperCAmelCase = use_stable_embedding _UpperCAmelCase = shared_input_output_embedding _UpperCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , ) def _lowerCamelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F"got {self.rope_scaling}") _UpperCAmelCase = self.rope_scaling.get('type' , A) _UpperCAmelCase = self.rope_scaling.get('factor' , A) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
339
1
from math import sqrt def A ( _UpperCAmelCase : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A ( _UpperCAmelCase : int = 10_001 ) -> int: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 1 while count != nth and number < 3: number += 1 if is_prime(_UpperCAmelCase ): count += 1 while count != nth: number += 2 if is_prime(_UpperCAmelCase ): count += 1 return number if __name__ == "__main__": print(f"""{solution() = }""")
339
def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') ) def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = credit_card_number _UpperCAmelCase = 0 _UpperCAmelCase = len(_UpperCAmelCase ) - 2 for i in range(_UpperCAmelCase , -1 , -2 ): # double the value of every second digit _UpperCAmelCase = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 _UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(F"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(_UpperCAmelCase ) <= 16: print(F"{error_message} of its length." ) return False if not validate_initial_digits(_UpperCAmelCase ): print(F"{error_message} of its first two digits." ) return False if not luhn_validation(_UpperCAmelCase ): print(F"{error_message} it fails the Luhn check." ) return False print(F"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("4111111111111111") validate_credit_card_number("32323")
339
1
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> int: '''simple docstring''' _UpperCAmelCase = s.rsplit(_UpperCAmelCase , _UpperCAmelCase ) return new.join(_UpperCAmelCase ) def A ( _UpperCAmelCase : str ) -> Union[str, Any]: '''simple docstring''' # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def A ( _UpperCAmelCase : Union[str, Any] ) -> Tuple: '''simple docstring''' _UpperCAmelCase = {} _UpperCAmelCase = ['group_1', 'group_2', 'group_3', 'group_4'] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: _UpperCAmelCase = key.replace(F"{group_key}." , F"{group_key}.group." ) if "res_path" in key: _UpperCAmelCase = key.replace('res_path.' , 'res_path.path.' ) if key.endswith('.w' ): _UpperCAmelCase = rreplace(_UpperCAmelCase , '.w' , '.weight' , 1 ) if key.endswith('.b' ): _UpperCAmelCase = rreplace(_UpperCAmelCase , '.b' , '.bias' , 1 ) _UpperCAmelCase = value.float() return upgrade @torch.no_grad() def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[int]=True ) -> Any: '''simple docstring''' from dall_e import Encoder _UpperCAmelCase = Encoder() if os.path.exists(_UpperCAmelCase ): _UpperCAmelCase = torch.load(_UpperCAmelCase ) else: _UpperCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _UpperCAmelCase = ckpt.state_dict() encoder.load_state_dict(_UpperCAmelCase ) if config_path is not None: _UpperCAmelCase = FlavaImageCodebookConfig.from_pretrained(_UpperCAmelCase ) else: _UpperCAmelCase = FlavaImageCodebookConfig() _UpperCAmelCase = FlavaImageCodebook(_UpperCAmelCase ).eval() _UpperCAmelCase = encoder.state_dict() _UpperCAmelCase = upgrade_state_dict(_UpperCAmelCase ) hf_model.load_state_dict(_UpperCAmelCase ) _UpperCAmelCase = hf_model.state_dict() _UpperCAmelCase = count_parameters(_UpperCAmelCase ) _UpperCAmelCase = count_parameters(_UpperCAmelCase ) assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) if save_checkpoint: hf_model.save_pretrained(_UpperCAmelCase ) else: return hf_state_dict if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") UpperCAmelCase__ = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
339
from functools import reduce UpperCAmelCase__ = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def A ( _UpperCAmelCase : str = N ) -> int: '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(_UpperCAmelCase ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
339
1
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys UpperCAmelCase__ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") UpperCAmelCase__ = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode("utf-8").split() UpperCAmelCase__ = "|".join(sys.argv[1:]) UpperCAmelCase__ = re.compile(rf"""^({joined_dirs}).*?\.py$""") UpperCAmelCase__ = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
339
from __future__ import annotations from collections.abc import Callable UpperCAmelCase__ = list[list[float | int]] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for row in range(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = matrix[row][col] _UpperCAmelCase = vector[row][0] _UpperCAmelCase = 0 _UpperCAmelCase = 0 while row < size and col < size: # pivoting _UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: _UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , _UpperCAmelCase ): _UpperCAmelCase = augmented[rowa][col] / augmented[row][col] _UpperCAmelCase = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , _UpperCAmelCase ): for row in range(_UpperCAmelCase ): _UpperCAmelCase = augmented[row][col] / augmented[col][col] for cola in range(_UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase ) ] def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for x_val, y_val in enumerate(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = (x_val + 1) ** (size - col - 1) _UpperCAmelCase = y_val _UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase ) def interpolated_func(_UpperCAmelCase : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(_UpperCAmelCase ) ) return interpolated_func def A ( _UpperCAmelCase : int ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int: '''simple docstring''' _UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )] _UpperCAmelCase = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] _UpperCAmelCase = 0 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for poly in polynomials: _UpperCAmelCase = 1 while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ): x_val += 1 ret += poly(_UpperCAmelCase ) return ret if __name__ == "__main__": print(f"""{solution() = }""")
339
1
import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] ) -> str: '''simple docstring''' assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Any: '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _UpperCAmelCase = SqlDatasetReader( 'dataset' , 'sqlite:///' + sqlite_path , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase ).read() _check_sql_dataset(_UpperCAmelCase , _UpperCAmelCase ) @require_sqlalchemy @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _UpperCAmelCase = features.copy() if features else default_expected_features _UpperCAmelCase = ( Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _UpperCAmelCase = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read() _check_sql_dataset(_UpperCAmelCase , _UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> int: '''simple docstring''' with contextlib.closing(sqlitea.connect(_UpperCAmelCase ) ) as con: _UpperCAmelCase = con.cursor() cur.execute('SELECT * FROM dataset' ) for row in cur: yield row @require_sqlalchemy def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> Dict: '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = os.path.join(_UpperCAmelCase , 'tmp.sql' ) _UpperCAmelCase = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=_UpperCAmelCase ).read() SqlDatasetWriter(_UpperCAmelCase , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write() _UpperCAmelCase = iter_sql_file(_UpperCAmelCase ) _UpperCAmelCase = iter_sql_file(_UpperCAmelCase ) for rowa, rowa in zip(_UpperCAmelCase , _UpperCAmelCase ): assert rowa == rowa @require_sqlalchemy def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = os.path.join(_UpperCAmelCase , 'tmp.sql' ) _UpperCAmelCase = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=_UpperCAmelCase ).read() SqlDatasetWriter(_UpperCAmelCase , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write() _UpperCAmelCase = iter_sql_file(_UpperCAmelCase ) _UpperCAmelCase = iter_sql_file(_UpperCAmelCase ) for rowa, rowa in zip(_UpperCAmelCase , _UpperCAmelCase ): assert rowa == rowa @require_sqlalchemy def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = os.path.join(_UpperCAmelCase , 'tmp.sql' ) _UpperCAmelCase = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=_UpperCAmelCase ).read() with pytest.raises(_UpperCAmelCase ): SqlDatasetWriter(_UpperCAmelCase , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
339
from __future__ import annotations def A ( _UpperCAmelCase : list[int] ) -> bool: '''simple docstring''' return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
from __future__ import annotations def A ( _UpperCAmelCase : list[float] , _UpperCAmelCase : int ) -> Union[str, Any]: '''simple docstring''' print(F"Vertex\tShortest Distance from vertex {src}" ) for i, d in enumerate(_UpperCAmelCase ): print(F"{i}\t\t{d}" ) def A ( _UpperCAmelCase : list[dict[str, int]] , _UpperCAmelCase : list[float] , _UpperCAmelCase : int ) -> str: '''simple docstring''' for j in range(_UpperCAmelCase ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (graph[j][k] for k in ['src', 'dst', 'weight']) if distance[u] != float('inf' ) and distance[u] + w < distance[v]: return True return False def A ( _UpperCAmelCase : list[dict[str, int]] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> list[float]: '''simple docstring''' _UpperCAmelCase = [float('inf' )] * vertex_count _UpperCAmelCase = 0.0 for _ in range(vertex_count - 1 ): for j in range(_UpperCAmelCase ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (graph[j][k] for k in ['src', 'dst', 'weight']) if distance[u] != float('inf' ) and distance[u] + w < distance[v]: _UpperCAmelCase = distance[u] + w _UpperCAmelCase = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if negative_cycle_exists: raise Exception('Negative cycle found' ) return distance if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ = int(input("Enter number of vertices: ").strip()) UpperCAmelCase__ = int(input("Enter number of edges: ").strip()) UpperCAmelCase__ = [{} for _ in range(E)] for i in range(E): print("Edge ", i + 1) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = ( int(x) for x in input("Enter source, destination, weight: ").strip().split(" ") ) UpperCAmelCase__ = {"src": src, "dst": dest, "weight": weight} UpperCAmelCase__ = int(input("\nEnter shortest path source:").strip()) UpperCAmelCase__ = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
339
import os UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} def A ( _UpperCAmelCase : str ) -> int: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(_UpperCAmelCase ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A ( _UpperCAmelCase : int ) -> str: '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1_000 numerals += m_count * "M" num %= 1_000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int: '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase ) _UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase ) savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase ) return savings if __name__ == "__main__": print(f"""{solution() = }""")
339
1
UpperCAmelCase__ = [ (1000, "M"), (900, "CM"), (500, "D"), (400, "CD"), (100, "C"), (90, "XC"), (50, "L"), (40, "XL"), (10, "X"), (9, "IX"), (5, "V"), (4, "IV"), (1, "I"), ] def A ( _UpperCAmelCase : str ) -> int: '''simple docstring''' _UpperCAmelCase = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000} _UpperCAmelCase = 0 _UpperCAmelCase = 0 while place < len(_UpperCAmelCase ): if (place + 1 < len(_UpperCAmelCase )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def A ( _UpperCAmelCase : int ) -> str: '''simple docstring''' _UpperCAmelCase = [] for arabic, roman in ROMAN: ((_UpperCAmelCase) , (_UpperCAmelCase)) = divmod(_UpperCAmelCase , _UpperCAmelCase ) result.append(roman * factor ) if number == 0: break return "".join(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
import requests from bsa import BeautifulSoup def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str: '''simple docstring''' _UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' ) _UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} ) _UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' ) return anchors[2].get_text() if __name__ == "__main__": UpperCAmelCase__ = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
339
1
from math import factorial UpperCAmelCase__ = {str(digit): factorial(digit) for digit in range(10)} def A ( _UpperCAmelCase : int ) -> int: '''simple docstring''' if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError('Parameter number must be int' ) if number < 0: raise ValueError('Parameter number must be greater than or equal to 0' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCAmelCase ) ) def A ( _UpperCAmelCase : int = 60 , _UpperCAmelCase : int = 1_000_000 ) -> int: '''simple docstring''' if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError('Parameters chain_length and number_limit must be int' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( 'Parameters chain_length and number_limit must be greater than 0' ) # the counter for the chains with the exact desired length _UpperCAmelCase = 0 # the cached sizes of the previous chains _UpperCAmelCase = {} for start_chain_element in range(1 , _UpperCAmelCase ): # The temporary set will contain the elements of the chain _UpperCAmelCase = set() _UpperCAmelCase = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. _UpperCAmelCase = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_UpperCAmelCase ) chain_set_length += 1 _UpperCAmelCase = digit_factorial_sum(_UpperCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] _UpperCAmelCase = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(f"""{solution()}""")
339
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_attention_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_choices def _lowerCamelCase ( self : Optional[Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCAmelCase = None if self.use_attention_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCAmelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCamelCase ( self : List[Any]) -> List[str]: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = True UpperCamelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" _UpperCAmelCase = FlaxRoFormerModelTester(self) @slow def _lowerCamelCase ( self : List[Any]) -> Dict: """simple docstring""" for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A) _UpperCAmelCase = model(np.ones((1, 1))) self.assertIsNotNone(A) @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def _lowerCamelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base') _UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]]) _UpperCAmelCase = model(A)[0] _UpperCAmelCase = 5_00_00 _UpperCAmelCase = (1, 6, vocab_size) self.assertEqual(output.shape , A) _UpperCAmelCase = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]]) self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
339
1
UpperCAmelCase__ = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
339
UpperCAmelCase__ = {} def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: '''simple docstring''' # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _UpperCAmelCase = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 ) _UpperCAmelCase = state_late + state_absent + state_ontime _UpperCAmelCase = prizestrings return prizestrings def A ( _UpperCAmelCase : int = 30 ) -> int: '''simple docstring''' return _calculate(_UpperCAmelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
339
1
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCAmelCase__ = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
339
import os import sys import unittest UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers") class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = find_backend(' if not is_torch_available():') self.assertEqual(A , 'torch') # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):') self.assertEqual(A , 'torch_and_transformers') # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _UpperCAmelCase = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):') self.assertEqual(A , 'torch_and_transformers_and_onnx') def _lowerCamelCase ( self : int) -> Dict: """simple docstring""" _UpperCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , A) self.assertIn('torch_and_transformers' , A) self.assertIn('flax_and_transformers' , A) self.assertIn('torch_and_transformers_and_onnx' , A) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch']) self.assertIn('FlaxUNet2DConditionModel' , objects['flax']) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers']) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers']) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy']) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx']) def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'') self.assertEqual(A , '\nCONSTANT = None\n') _UpperCAmelCase = create_dummy_object('function' , '\'torch\'') self.assertEqual( A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n') _UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' _UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'') self.assertEqual(A , A) def _lowerCamelCase ( self : Dict) -> int: """simple docstring""" _UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' _UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']}) self.assertEqual(dummy_files['torch'] , A)
339
1
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool: '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
339
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") UpperCAmelCase__ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , ) UpperCamelCase = field( default=1_0_2_4 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} ) def _lowerCamelCase ( self : str) -> List[Any]: """simple docstring""" if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.') else: _UpperCAmelCase = self.train_file.split('.')[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." _UpperCAmelCase = self.validation_file.split('.')[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __lowerCAmelCase : UpperCamelCase = field( default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) def A ( ) -> Optional[int]: '''simple docstring''' # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_UpperCAmelCase ) datasets.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. _UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: _UpperCAmelCase = data_args.train_file.split('.' )[-1] _UpperCAmelCase = data_args.test_file.split('.' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." _UpperCAmelCase = data_args.test_file else: raise ValueError('Need either a GLUE task or a test file for `do_predict`.' ) for key in data_files.keys(): logger.info(F"load a local file for {key}: {data_files[key]}" ) if data_args.train_file.endswith('.csv' ): # Loading a dataset from local csv files _UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files _UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels _UpperCAmelCase = raw_datasets['train'].features['label'].names _UpperCAmelCase = len(_UpperCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer _UpperCAmelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , ) _UpperCAmelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: _UpperCAmelCase = 'max_length' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _UpperCAmelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. _UpperCAmelCase = {'Refused': 0, 'Entailed': 1} _UpperCAmelCase = {0: 'Refused', 1: 'Entailed'} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ): # Tokenize the texts def _convert_table_text_to_pandas(_UpperCAmelCase : Dict ): _UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )] _UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd _UpperCAmelCase = examples['statement'] _UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) ) _UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ) _UpperCAmelCase = examples['label'] return result with training_args.main_process_first(desc='dataset map pre-processing' ): _UpperCAmelCase = raw_datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) _UpperCAmelCase = raw_datasets['train'] if data_args.max_train_samples is not None: _UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) _UpperCAmelCase = raw_datasets['validation'] if data_args.max_eval_samples is not None: _UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('--do_predict requires a test dataset' ) _UpperCAmelCase = raw_datasets['test'] if data_args.max_predict_samples is not None: _UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ): logger.info(F"Sample {index} of the training set: {train_dataset[index]}." ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_UpperCAmelCase : EvalPrediction ): _UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _UpperCAmelCase = default_data_collator elif training_args.fpaa: _UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) else: _UpperCAmelCase = None # Initialize our Trainer _UpperCAmelCase = Trainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase ) _UpperCAmelCase = train_result.metrics _UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase ) ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('train' , _UpperCAmelCase ) trainer.save_metrics('train' , _UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase ) _UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.log_metrics('eval' , _UpperCAmelCase ) trainer.save_metrics('eval' , _UpperCAmelCase ) if training_args.do_predict: logger.info('*** Predict ***' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. _UpperCAmelCase = predict_dataset.remove_columns('label' ) _UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) _UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' ) if trainer.is_world_process_zero(): with open(_UpperCAmelCase , 'w' ) as writer: logger.info('***** Predict Results *****' ) writer.write('index\tprediction\n' ) for index, item in enumerate(_UpperCAmelCase ): _UpperCAmelCase = label_list[item] writer.write(F"{index}\t{item}\n" ) _UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'} if training_args.push_to_hub: trainer.push_to_hub(**_UpperCAmelCase ) else: trainer.create_model_card(**_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class __lowerCAmelCase ( A ): UpperCamelCase = '''open-llama''' def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = intermediate_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = rms_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = kwargs.pop( 'use_memorry_efficient_attention' , A) _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_dropout_prob _UpperCAmelCase = use_stable_embedding _UpperCAmelCase = shared_input_output_embedding _UpperCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , ) def _lowerCamelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F"got {self.rope_scaling}") _UpperCAmelCase = self.rope_scaling.get('type' , A) _UpperCAmelCase = self.rope_scaling.get('factor' , A) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
339
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any: '''simple docstring''' _UpperCAmelCase = multiprocessing.Manager() _UpperCAmelCase = manager.list() _UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('timed out' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil _UpperCAmelCase = shutil.rmtree _UpperCAmelCase = os.rmdir _UpperCAmelCase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: _UpperCAmelCase = {} with swallow_io(): with time_limit(_UpperCAmelCase ): exec(_UpperCAmelCase , _UpperCAmelCase ) result.append('passed' ) except TimeoutException: result.append('timed out' ) except BaseException as e: result.append(F"failed: {e}" ) # Needed for cleaning up. _UpperCAmelCase = rmtree _UpperCAmelCase = rmdir _UpperCAmelCase = chdir @contextlib.contextmanager def A ( _UpperCAmelCase : Union[str, Any] ) -> Any: '''simple docstring''' def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ): raise TimeoutException('Timed out!' ) signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase ) signal.signal(signal.SIGALRM , _UpperCAmelCase ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def A ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = WriteOnlyStringIO() with contextlib.redirect_stdout(_UpperCAmelCase ): with contextlib.redirect_stderr(_UpperCAmelCase ): with redirect_stdin(_UpperCAmelCase ): yield @contextlib.contextmanager def A ( ) -> Any: '''simple docstring''' with tempfile.TemporaryDirectory() as dirname: with chdir(_UpperCAmelCase ): yield dirname class __lowerCAmelCase ( A ): pass class __lowerCAmelCase ( io.StringIO ): def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any: """simple docstring""" raise OSError def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]: """simple docstring""" raise OSError def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]: """simple docstring""" raise OSError def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]: """simple docstring""" return False class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore UpperCamelCase = '''stdin''' @contextlib.contextmanager def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' if root == ".": yield return _UpperCAmelCase = os.getcwd() os.chdir(_UpperCAmelCase ) try: yield except BaseException as exc: raise exc finally: os.chdir(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str]=None ) -> Any: '''simple docstring''' if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins _UpperCAmelCase = None _UpperCAmelCase = None import os _UpperCAmelCase = '1' _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import shutil _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import subprocess _UpperCAmelCase = None # type: ignore _UpperCAmelCase = None import sys _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None
339
1
UpperCAmelCase__ = {} def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: '''simple docstring''' # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _UpperCAmelCase = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 ) _UpperCAmelCase = state_late + state_absent + state_ontime _UpperCAmelCase = prizestrings return prizestrings def A ( _UpperCAmelCase : int = 30 ) -> int: '''simple docstring''' return _calculate(_UpperCAmelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
339
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str: '''simple docstring''' try: _UpperCAmelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _UpperCAmelCase = default else: # KEY is set, convert it to True or False. try: _UpperCAmelCase = strtobool(_UpperCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False) def A ( _UpperCAmelCase : List[str] ) -> List[str]: '''simple docstring''' return unittest.skip('Test was skipped' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> str: '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> str: '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> Dict: '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> List[str]: '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : str ) -> str: '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> str: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> int: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> Any: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict: '''simple docstring''' if test_case is None: return partial(_UpperCAmelCase , version=_UpperCAmelCase ) return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> int: '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase ) UpperCAmelCase__ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def A ( _UpperCAmelCase : List[str] ) -> Any: '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase ) class __lowerCAmelCase ( unittest.TestCase ): UpperCamelCase = True @classmethod def _lowerCamelCase ( cls : List[Any]) -> Tuple: """simple docstring""" _UpperCAmelCase = tempfile.mkdtemp() @classmethod def _lowerCamelCase ( cls : Union[str, Any]) -> str: """simple docstring""" if os.path.exists(cls.tmpdir): shutil.rmtree(cls.tmpdir) def _lowerCamelCase ( self : List[str]) -> List[Any]: """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir).glob('**/*'): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(A) class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Dict) -> Tuple: """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple: """simple docstring""" _UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop) def A ( _UpperCAmelCase : List[Any] ) -> int: '''simple docstring''' _UpperCAmelCase = AcceleratorState() _UpperCAmelCase = tensor[None].clone().to(state.device ) _UpperCAmelCase = gather(_UpperCAmelCase ).cpu() _UpperCAmelCase = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , _UpperCAmelCase ): return False return True class __lowerCAmelCase : def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]: """simple docstring""" _UpperCAmelCase = returncode _UpperCAmelCase = stdout _UpperCAmelCase = stderr async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]: '''simple docstring''' while True: _UpperCAmelCase = await stream.readline() if line: callback(_UpperCAmelCase ) else: break async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput: '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(_UpperCAmelCase ) ) _UpperCAmelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _UpperCAmelCase = [] _UpperCAmelCase = [] def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ): _UpperCAmelCase = line.decode('utf-8' ).rstrip() sink.append(_UpperCAmelCase ) if not quiet: print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ), ] , timeout=_UpperCAmelCase , ) return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase ) def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput: '''simple docstring''' _UpperCAmelCase = asyncio.get_event_loop() _UpperCAmelCase = loop.run_until_complete( _stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) ) _UpperCAmelCase = ' '.join(_UpperCAmelCase ) if result.returncode > 0: _UpperCAmelCase = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class __lowerCAmelCase ( A ): pass def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple: '''simple docstring''' try: _UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_UpperCAmelCase , 'decode' ): _UpperCAmelCase = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
339
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : List[str]) -> List[Any]: """simple docstring""" _UpperCAmelCase = tempfile.mkdtemp() # fmt: off _UpperCAmelCase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on _UpperCAmelCase = dict(zip(A , range(len(A)))) _UpperCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] _UpperCAmelCase = {'unk_token': '<unk>'} _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(A) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(A)) _UpperCAmelCase = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], 'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } _UpperCAmelCase = os.path.join(self.tmpdirname , A) with open(self.image_processor_file , 'w' , encoding='utf-8') as fp: json.dump(A , A) def _lowerCamelCase ( self : Union[str, Any] , **A : Optional[Any]) -> Union[str, Any]: """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **A) def _lowerCamelCase ( self : Optional[int] , **A : Tuple) -> str: """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A) def _lowerCamelCase ( self : Union[str, Any] , **A : Any) -> Optional[Any]: """simple docstring""" return CLIPImageProcessor.from_pretrained(self.tmpdirname , **A) def _lowerCamelCase ( self : Dict) -> List[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname) def _lowerCamelCase ( self : Any) -> Tuple: """simple docstring""" _UpperCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)] _UpperCAmelCase = [Image.fromarray(np.moveaxis(A , 0 , -1)) for x in image_inputs] return image_inputs def _lowerCamelCase ( self : int) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = CLIPProcessor(tokenizer=A , image_processor=A) processor_slow.save_pretrained(self.tmpdirname) _UpperCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A) _UpperCAmelCase = CLIPProcessor(tokenizer=A , image_processor=A) processor_fast.save_pretrained(self.tmpdirname) _UpperCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , A) self.assertIsInstance(processor_fast.tokenizer , A) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , A) self.assertIsInstance(processor_fast.image_processor , A) def _lowerCamelCase ( self : List[str]) -> int: """simple docstring""" _UpperCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) _UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)') _UpperCAmelCase = self.get_image_processor(do_normalize=A , padding_value=1.0) _UpperCAmelCase = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , A) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , A) def _lowerCamelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = CLIPProcessor(tokenizer=A , image_processor=A) _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = image_processor(A , return_tensors='np') _UpperCAmelCase = processor(images=A , return_tensors='np') for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def _lowerCamelCase ( self : str) -> Any: """simple docstring""" _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = CLIPProcessor(tokenizer=A , image_processor=A) _UpperCAmelCase = 'lower newer' _UpperCAmelCase = processor(text=A) _UpperCAmelCase = tokenizer(A) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _lowerCamelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = CLIPProcessor(tokenizer=A , image_processor=A) _UpperCAmelCase = 'lower newer' _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=A , images=A) self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values']) # test if it raises when no input is passed with pytest.raises(A): processor() def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = CLIPProcessor(tokenizer=A , image_processor=A) _UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase = processor.batch_decode(A) _UpperCAmelCase = tokenizer.batch_decode(A) self.assertListEqual(A , A) def _lowerCamelCase ( self : List[Any]) -> Tuple: """simple docstring""" _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = CLIPProcessor(tokenizer=A , image_processor=A) _UpperCAmelCase = 'lower newer' _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=A , images=A) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
339
from __future__ import annotations UpperCAmelCase__ = list[list[int]] # assigning initial values to the grid UpperCAmelCase__ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase__ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool: '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None: '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def A ( _UpperCAmelCase : Matrix ) -> Matrix | None: '''simple docstring''' if location := find_empty_location(_UpperCAmelCase ): _UpperCAmelCase , _UpperCAmelCase = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): _UpperCAmelCase = digit if sudoku(_UpperCAmelCase ) is not None: return grid _UpperCAmelCase = 0 return None def A ( _UpperCAmelCase : Matrix ) -> None: '''simple docstring''' for row in grid: for cell in row: print(_UpperCAmelCase , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") UpperCAmelCase__ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
339
1
UpperCAmelCase__ = "0.18.2" from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
339
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : List[Any]) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence'), 'references': datasets.Value('string' , id='sequence'), }) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]: """simple docstring""" import nltk nltk.download('wordnet') if NLTK_VERSION >= version.Version('3.6.5'): nltk.download('punkt') if NLTK_VERSION >= version.Version('3.6.6'): nltk.download('omw-1.4') def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any: """simple docstring""" if NLTK_VERSION >= version.Version('3.6.5'): _UpperCAmelCase = [ meteor_score.single_meteor_score( word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] else: _UpperCAmelCase = [ meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] return {"meteor": np.mean(A)}
339
1
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration UpperCAmelCase__ = pytest.mark.integration UpperCAmelCase__ = {"comet"} UpperCAmelCase__ = importlib.util.find_spec("fairseq") is not None UpperCAmelCase__ = {"code_eval"} UpperCAmelCase__ = os.name == "nt" UpperCAmelCase__ = {"bertscore", "frugalscore", "perplexity"} UpperCAmelCase__ = importlib.util.find_spec("transformers") is not None def A ( _UpperCAmelCase : Any ) -> Dict: '''simple docstring''' @wraps(_UpperCAmelCase ) def wrapper(self : Tuple , _UpperCAmelCase : Any ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self , _UpperCAmelCase ) return wrapper def A ( _UpperCAmelCase : str ) -> Any: '''simple docstring''' @wraps(_UpperCAmelCase ) def wrapper(self : List[str] , _UpperCAmelCase : Tuple ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self , _UpperCAmelCase ) return wrapper def A ( _UpperCAmelCase : Dict ) -> List[str]: '''simple docstring''' @wraps(_UpperCAmelCase ) def wrapper(self : List[Any] , _UpperCAmelCase : Any ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self , _UpperCAmelCase ) return wrapper def A ( ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( A , A , A ) @local class __lowerCAmelCase ( parameterized.TestCase ): UpperCamelCase = {} UpperCamelCase = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning') @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning') def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> str: """simple docstring""" _UpperCAmelCase = '[...]' _UpperCAmelCase = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , A)).module_path) _UpperCAmelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=A) # check parameters _UpperCAmelCase = inspect.signature(metric._compute).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs # run doctest with self.patch_intensive_calls(A , metric_module.__name__): with self.use_local_metrics(): try: _UpperCAmelCase = doctest.testmod(A , verbose=A , raise_on_error=A) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0) self.assertGreater(results.attempted , 1) @slow def _lowerCamelCase ( self : Optional[Any] , A : Union[str, Any]) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = '[...]' _UpperCAmelCase = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , A)).module_path) # run doctest with self.use_local_metrics(): _UpperCAmelCase = doctest.testmod(A , verbose=A , raise_on_error=A) self.assertEqual(results.failed , 0) self.assertGreater(results.attempted , 1) @contextmanager def _lowerCamelCase ( self : List[str] , A : Optional[Any] , A : Any) -> Dict: """simple docstring""" if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](A): yield else: yield @contextmanager def _lowerCamelCase ( self : Dict) -> Any: """simple docstring""" def load_local_metric(A : Tuple , *A : int , **A : List[Any]): return load_metric(os.path.join('metrics' , A) , *A , **A) with patch('datasets.load_metric') as mock_load_metric: _UpperCAmelCase = load_local_metric yield @classmethod def _lowerCamelCase ( cls : Optional[int] , A : int) -> Tuple: """simple docstring""" def wrapper(A : Union[str, Any]): _UpperCAmelCase = contextmanager(A) _UpperCAmelCase = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[str]: '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class __lowerCAmelCase ( A ): def _lowerCamelCase ( self : Any , A : List[str]) -> Dict: """simple docstring""" assert len(input_dict['input_ids']) == 2 return np.array([1.0_3, 1.0_4]) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: _UpperCAmelCase = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def A ( _UpperCAmelCase : Any ) -> Tuple: '''simple docstring''' import torch def bert_cos_score_idf(_UpperCAmelCase : str , _UpperCAmelCase : int , *_UpperCAmelCase : int , **_UpperCAmelCase : int ): return torch.tensor([[1.0, 1.0, 1.0]] * len(_UpperCAmelCase ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: _UpperCAmelCase = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def A ( _UpperCAmelCase : Tuple ) -> Optional[Any]: '''simple docstring''' def load_from_checkpoint(_UpperCAmelCase : Optional[Any] ): class __lowerCAmelCase : def _lowerCamelCase ( self : Union[str, Any] , A : Union[str, Any] , *A : Union[str, Any] , **A : Union[str, Any]) -> List[Any]: """simple docstring""" assert len(A) == 2 _UpperCAmelCase = [0.1_9, 0.9_2] return scores, sum(A) / len(A) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: _UpperCAmelCase = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: _UpperCAmelCase = load_from_checkpoint yield def A ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = load_metric(os.path.join('metrics' , 'seqeval' ) ) _UpperCAmelCase = 'ERROR' _UpperCAmelCase = F"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}" with pytest.raises(_UpperCAmelCase , match=re.escape(_UpperCAmelCase ) ): metric.compute(predictions=[] , references=[] , scheme=_UpperCAmelCase )
339
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration UpperCAmelCase__ = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' _UpperCAmelCase = ['layers', 'blocks'] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def A ( _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = list(s_dict.keys() ) for key in keys: _UpperCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: _UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase ) print(F"{key} -> {new_key}" ) _UpperCAmelCase = s_dict.pop(_UpperCAmelCase ) return s_dict def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = emb.weight.shape _UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) _UpperCAmelCase = emb.weight.data return lin_layer def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes: '''simple docstring''' os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) _UpperCAmelCase = os.path.basename(_UpperCAmelCase ) _UpperCAmelCase = url.split('/' )[-2] _UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ): raise RuntimeError(F"{download_target} exists and is not a regular file" ) if os.path.isfile(_UpperCAmelCase ): _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output: with tqdm( total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop: while True: _UpperCAmelCase = source.read(8_192 ) if not buffer: break output.write(_UpperCAmelCase ) loop.update(len(_UpperCAmelCase ) ) _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( 'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' ) return model_bytes def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' if ".pt" not in checkpoint_path: _UpperCAmelCase = _download(_MODELS[checkpoint_path] ) else: _UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' ) _UpperCAmelCase = original_checkpoint['dims'] _UpperCAmelCase = original_checkpoint['model_state_dict'] _UpperCAmelCase = state_dict['decoder.token_embedding.weight'] remove_ignore_keys_(_UpperCAmelCase ) rename_keys(_UpperCAmelCase ) _UpperCAmelCase = True _UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0] _UpperCAmelCase = WhisperConfig( vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , ) _UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F" but all the following weights are missing {missing}" ) if tie_embeds: _UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _UpperCAmelCase = proj_out_weights model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase__ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
339
1
UpperCAmelCase__ = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } UpperCAmelCase__ = {value: key for key, value in encode_dict.items()} def A ( _UpperCAmelCase : str ) -> str: '''simple docstring''' _UpperCAmelCase = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def A ( _UpperCAmelCase : str ) -> str: '''simple docstring''' if set(_UpperCAmelCase ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) _UpperCAmelCase = '' for word in coded.split(): while len(_UpperCAmelCase ) != 0: decoded += decode_dict[word[:5]] _UpperCAmelCase = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
339
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__) class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ): UpperCamelCase = None UpperCamelCase = None class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ): UpperCamelCase = datasets.Audio() UpperCamelCase = '''audio''' UpperCamelCase = AudioFolderConfig UpperCamelCase = 42 # definition at the bottom of the script UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' ) UpperCAmelCase__ = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] UpperCAmelCase__ = AUDIO_EXTENSIONS
339
1
import glob import os import random from string import ascii_lowercase, digits import cva UpperCAmelCase__ = "" UpperCAmelCase__ = "" UpperCAmelCase__ = "" UpperCAmelCase__ = 1 # (0 is vertical, 1 is horizontal) def A ( ) -> None: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = get_dataset(_UpperCAmelCase , _UpperCAmelCase ) print('Processing...' ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = update_image_and_anno(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for index, image in enumerate(_UpperCAmelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _UpperCAmelCase = random_chars(32 ) _UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0] _UpperCAmelCase = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" cva.imwrite(F"/{file_root}.jpg" , _UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"Success {index+1}/{len(_UpperCAmelCase )} with {file_name}" ) _UpperCAmelCase = [] for anno in new_annos[index]: _UpperCAmelCase = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" annos_list.append(_UpperCAmelCase ) with open(F"/{file_root}.txt" , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> tuple[list, list]: '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = [] for label_file in glob.glob(os.path.join(_UpperCAmelCase , '*.txt' ) ): _UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(_UpperCAmelCase ) as in_file: _UpperCAmelCase = in_file.readlines() _UpperCAmelCase = os.path.join(_UpperCAmelCase , F"{label_name}.jpg" ) _UpperCAmelCase = [] for obj_list in obj_lists: _UpperCAmelCase = obj_list.rstrip('\n' ).split(' ' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(_UpperCAmelCase ) labels.append(_UpperCAmelCase ) return img_paths, labels def A ( _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : int = 1 ) -> tuple[list, list, list]: '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] for idx in range(len(_UpperCAmelCase ) ): _UpperCAmelCase = [] _UpperCAmelCase = img_list[idx] path_list.append(_UpperCAmelCase ) _UpperCAmelCase = anno_list[idx] _UpperCAmelCase = cva.imread(_UpperCAmelCase ) if flip_type == 1: _UpperCAmelCase = cva.flip(_UpperCAmelCase , _UpperCAmelCase ) for bbox in img_annos: _UpperCAmelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: _UpperCAmelCase = cva.flip(_UpperCAmelCase , _UpperCAmelCase ) for bbox in img_annos: _UpperCAmelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(_UpperCAmelCase ) new_imgs_list.append(_UpperCAmelCase ) return new_imgs_list, new_annos_lists, path_list def A ( _UpperCAmelCase : int = 32 ) -> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" _UpperCAmelCase = ascii_lowercase + digits return "".join(random.choice(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
339
import sys from collections import defaultdict class __lowerCAmelCase : def __init__( self : int) -> str: """simple docstring""" _UpperCAmelCase = [] def _lowerCamelCase ( self : Any , A : List[str]) -> int: """simple docstring""" return self.node_position[vertex] def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = pos def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _UpperCAmelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _UpperCAmelCase = 2 * start + 1 else: _UpperCAmelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: _UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child] _UpperCAmelCase , _UpperCAmelCase = ( heap[start], positions[start], ) _UpperCAmelCase , _UpperCAmelCase = temp, tempa _UpperCAmelCase = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , A) self.top_to_bottom(A , A , A , A) def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any: """simple docstring""" _UpperCAmelCase = position[index] while index != 0: _UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: _UpperCAmelCase = heap[parent] _UpperCAmelCase = position[parent] self.set_position(position[parent] , A) else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , A) break _UpperCAmelCase = parent else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , 0) def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str: """simple docstring""" _UpperCAmelCase = len(A) // 2 - 1 for i in range(A , -1 , -1): self.top_to_bottom(A , A , len(A) , A) def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = positions[0] _UpperCAmelCase = sys.maxsize self.top_to_bottom(A , 0 , len(A) , A) return temp def A ( _UpperCAmelCase : int ) -> Any: '''simple docstring''' _UpperCAmelCase = Heap() _UpperCAmelCase = [0] * len(_UpperCAmelCase ) _UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex _UpperCAmelCase = [] for vertex in range(len(_UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_UpperCAmelCase ) heap.node_position.append(_UpperCAmelCase ) _UpperCAmelCase = [] _UpperCAmelCase = 1 _UpperCAmelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: _UpperCAmelCase = 0 _UpperCAmelCase = distance heap.heapify(_UpperCAmelCase , _UpperCAmelCase ) for _ in range(1 , len(_UpperCAmelCase ) ): _UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _UpperCAmelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_UpperCAmelCase )] ): _UpperCAmelCase = distance heap.bottom_to_top( _UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase__ = int(input("Enter number of edges: ").strip()) UpperCAmelCase__ = defaultdict(list) for _ in range(edges_number): UpperCAmelCase__ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
339
1
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") UpperCAmelCase__ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : UpperCamelCase = field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) } , ) @dataclass class __lowerCAmelCase : UpperCamelCase = field( default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase = field( default=A , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def A ( ) -> Tuple: '''simple docstring''' # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_xnli' , _UpperCAmelCase ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_UpperCAmelCase ) datasets.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: _UpperCAmelCase = load_dataset( 'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: _UpperCAmelCase = load_dataset( 'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = train_dataset.features['label'].names if training_args.do_eval: _UpperCAmelCase = load_dataset( 'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = eval_dataset.features['label'].names if training_args.do_predict: _UpperCAmelCase = load_dataset( 'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = predict_dataset.features['label'].names # Labels _UpperCAmelCase = len(_UpperCAmelCase ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , idalabel={str(_UpperCAmelCase ): label for i, label in enumerate(_UpperCAmelCase )} , labelaid={label: i for i, label in enumerate(_UpperCAmelCase )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: _UpperCAmelCase = 'max_length' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _UpperCAmelCase = False def preprocess_function(_UpperCAmelCase : List[str] ): # Tokenize the texts return tokenizer( examples['premise'] , examples['hypothesis'] , padding=_UpperCAmelCase , max_length=data_args.max_seq_length , truncation=_UpperCAmelCase , ) if training_args.do_train: if data_args.max_train_samples is not None: _UpperCAmelCase = min(len(_UpperCAmelCase ) , data_args.max_train_samples ) _UpperCAmelCase = train_dataset.select(range(_UpperCAmelCase ) ) with training_args.main_process_first(desc='train dataset map pre-processing' ): _UpperCAmelCase = train_dataset.map( _UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , ) # Log a few random samples from the training set: for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ): logger.info(F"Sample {index} of the training set: {train_dataset[index]}." ) if training_args.do_eval: if data_args.max_eval_samples is not None: _UpperCAmelCase = min(len(_UpperCAmelCase ) , data_args.max_eval_samples ) _UpperCAmelCase = eval_dataset.select(range(_UpperCAmelCase ) ) with training_args.main_process_first(desc='validation dataset map pre-processing' ): _UpperCAmelCase = eval_dataset.map( _UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , ) if training_args.do_predict: if data_args.max_predict_samples is not None: _UpperCAmelCase = min(len(_UpperCAmelCase ) , data_args.max_predict_samples ) _UpperCAmelCase = predict_dataset.select(range(_UpperCAmelCase ) ) with training_args.main_process_first(desc='prediction dataset map pre-processing' ): _UpperCAmelCase = predict_dataset.map( _UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , ) # Get the metric function _UpperCAmelCase = evaluate.load('xnli' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_UpperCAmelCase : EvalPrediction ): _UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) return metric.compute(predictions=_UpperCAmelCase , references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _UpperCAmelCase = default_data_collator elif training_args.fpaa: _UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) else: _UpperCAmelCase = None # Initialize our Trainer _UpperCAmelCase = Trainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase ) _UpperCAmelCase = train_result.metrics _UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase ) ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('train' , _UpperCAmelCase ) trainer.save_metrics('train' , _UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase ) _UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.log_metrics('eval' , _UpperCAmelCase ) trainer.save_metrics('eval' , _UpperCAmelCase ) # Prediction if training_args.do_predict: logger.info('*** Predict ***' ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ) _UpperCAmelCase = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_UpperCAmelCase ) ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.log_metrics('predict' , _UpperCAmelCase ) trainer.save_metrics('predict' , _UpperCAmelCase ) _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) _UpperCAmelCase = os.path.join(training_args.output_dir , 'predictions.txt' ) if trainer.is_world_process_zero(): with open(_UpperCAmelCase , 'w' ) as writer: writer.write('index\tprediction\n' ) for index, item in enumerate(_UpperCAmelCase ): _UpperCAmelCase = label_list[item] writer.write(F"{index}\t{item}\n" ) if __name__ == "__main__": main()
339
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]: '''simple docstring''' # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>' ) == 1 _UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1 _UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple _UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _UpperCAmelCase = logits[0, masked_index, :] _UpperCAmelCase = logits.softmax(dim=0 ) _UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 ) _UpperCAmelCase = ' '.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] ) _UpperCAmelCase = tokenizer.mask_token _UpperCAmelCase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ): _UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' ) if " {0}".format(_UpperCAmelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base") UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() UpperCAmelCase__ = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
339
1
import itertools import string from collections.abc import Generator, Iterable def A ( _UpperCAmelCase : Iterable[str] , _UpperCAmelCase : int ) -> Generator[tuple[str, ...], None, None]: '''simple docstring''' _UpperCAmelCase = iter(_UpperCAmelCase ) while True: _UpperCAmelCase = tuple(itertools.islice(_UpperCAmelCase , _UpperCAmelCase ) ) if not chunk: return yield chunk def A ( _UpperCAmelCase : str ) -> str: '''simple docstring''' _UpperCAmelCase = ''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _UpperCAmelCase = '' if len(_UpperCAmelCase ) < 2: return dirty for i in range(len(_UpperCAmelCase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_UpperCAmelCase ) & 1: clean += "X" return clean def A ( _UpperCAmelCase : str ) -> list[str]: '''simple docstring''' # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) _UpperCAmelCase = 'ABCDEFGHIKLMNOPQRSTUVWXYZ' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _UpperCAmelCase = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_UpperCAmelCase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_UpperCAmelCase ) return table def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> str: '''simple docstring''' _UpperCAmelCase = generate_table(_UpperCAmelCase ) _UpperCAmelCase = prepare_input(_UpperCAmelCase ) _UpperCAmelCase = '' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_UpperCAmelCase , 2 ): _UpperCAmelCase , _UpperCAmelCase = divmod(table.index(_UpperCAmelCase ) , 5 ) _UpperCAmelCase , _UpperCAmelCase = divmod(table.index(_UpperCAmelCase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> str: '''simple docstring''' _UpperCAmelCase = generate_table(_UpperCAmelCase ) _UpperCAmelCase = '' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_UpperCAmelCase , 2 ): _UpperCAmelCase , _UpperCAmelCase = divmod(table.index(_UpperCAmelCase ) , 5 ) _UpperCAmelCase , _UpperCAmelCase = divmod(table.index(_UpperCAmelCase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
339
import math import unittest def A ( _UpperCAmelCase : int ) -> bool: '''simple docstring''' assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" self.assertTrue(is_prime(2)) self.assertTrue(is_prime(3)) self.assertTrue(is_prime(5)) self.assertTrue(is_prime(7)) self.assertTrue(is_prime(11)) self.assertTrue(is_prime(13)) self.assertTrue(is_prime(17)) self.assertTrue(is_prime(19)) self.assertTrue(is_prime(23)) self.assertTrue(is_prime(29)) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" with self.assertRaises(A): is_prime(-19) self.assertFalse( is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , ) self.assertFalse( is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , ) self.assertFalse(is_prime(2 * 2)) self.assertFalse(is_prime(2 * 3)) self.assertFalse(is_prime(3 * 3)) self.assertFalse(is_prime(3 * 5)) self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": unittest.main()
339
1
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : str) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=A , hypotheses=A , min_len=A , max_len=A) }
339
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : str) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=A , hypotheses=A , min_len=A , max_len=A) }
339
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter UpperCAmelCase__ = "Create a default config file for Accelerate with only a few flags set." def A ( _UpperCAmelCase : List[str]="no" , _UpperCAmelCase : str = default_json_config_file , _UpperCAmelCase : bool = False ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = Path(_UpperCAmelCase ) path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase ) if path.exists(): print( F"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." ) return False _UpperCAmelCase = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" ) _UpperCAmelCase = { 'compute_environment': 'LOCAL_MACHINE', 'mixed_precision': mixed_precision, } if torch.cuda.is_available(): _UpperCAmelCase = torch.cuda.device_count() _UpperCAmelCase = num_gpus _UpperCAmelCase = False if num_gpus > 1: _UpperCAmelCase = 'MULTI_GPU' else: _UpperCAmelCase = 'NO' elif is_xpu_available() and use_xpu: _UpperCAmelCase = torch.xpu.device_count() _UpperCAmelCase = num_xpus _UpperCAmelCase = False if num_xpus > 1: _UpperCAmelCase = 'MULTI_XPU' else: _UpperCAmelCase = 'NO' elif is_npu_available(): _UpperCAmelCase = torch.npu.device_count() _UpperCAmelCase = num_npus _UpperCAmelCase = False if num_npus > 1: _UpperCAmelCase = 'MULTI_NPU' else: _UpperCAmelCase = 'NO' else: _UpperCAmelCase = 0 _UpperCAmelCase = True _UpperCAmelCase = 1 _UpperCAmelCase = 'NO' _UpperCAmelCase = ClusterConfig(**_UpperCAmelCase ) config.to_json_file(_UpperCAmelCase ) return path def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] ) -> Dict: '''simple docstring''' _UpperCAmelCase = parser.add_parser('default' , parents=_UpperCAmelCase , help=_UpperCAmelCase , formatter_class=_UpperCAmelCase ) parser.add_argument( '--config_file' , default=_UpperCAmelCase , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , dest='save_location' , ) parser.add_argument( '--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=_UpperCAmelCase , help='Whether or not to use mixed precision training. ' 'Choose between FP16 and BF16 (bfloat16) training. ' 'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , ) parser.set_defaults(func=_UpperCAmelCase ) return parser def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F"accelerate configuration saved at {config_file}" )
339
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer UpperCAmelCase__ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast UpperCAmelCase__ = TaTokenizerFast UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys UpperCAmelCase__ = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
339
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "openai/imagegpt-small": "", "openai/imagegpt-medium": "", "openai/imagegpt-large": "", } class __lowerCAmelCase ( A ): UpperCamelCase = '''imagegpt''' UpperCamelCase = ['''past_key_values'''] UpperCamelCase = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Tuple , A : List[str]=5_12 + 1 , A : Any=32 * 32 , A : List[Any]=5_12 , A : Tuple=24 , A : Optional[int]=8 , A : Any=None , A : Optional[int]="quick_gelu" , A : List[Any]=0.1 , A : str=0.1 , A : Any=0.1 , A : int=1E-5 , A : Optional[Any]=0.0_2 , A : str=True , A : int=True , A : List[str]=False , A : Union[str, Any]=False , A : Union[str, Any]=False , **A : List[Any] , ) -> Tuple: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = n_inner _UpperCAmelCase = activation_function _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = attn_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = scale_attn_weights _UpperCAmelCase = use_cache _UpperCAmelCase = scale_attn_by_inverse_layer_idx _UpperCAmelCase = reorder_and_upcast_attn _UpperCAmelCase = tie_word_embeddings super().__init__(tie_word_embeddings=A , **A) class __lowerCAmelCase ( A ): @property def _lowerCamelCase ( self : Dict) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ]) def _lowerCamelCase ( self : str , A : "FeatureExtractionMixin" , A : int = 1 , A : int = -1 , A : bool = False , A : Optional["TensorType"] = None , A : int = 3 , A : int = 32 , A : int = 32 , ) -> Mapping[str, Any]: """simple docstring""" _UpperCAmelCase = self._generate_dummy_images(A , A , A , A) _UpperCAmelCase = dict(preprocessor(images=A , return_tensors=A)) return inputs
339
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class __lowerCAmelCase ( A ): UpperCamelCase = '''open-llama''' def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = intermediate_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = rms_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = kwargs.pop( 'use_memorry_efficient_attention' , A) _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_dropout_prob _UpperCAmelCase = use_stable_embedding _UpperCAmelCase = shared_input_output_embedding _UpperCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , ) def _lowerCamelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F"got {self.rope_scaling}") _UpperCAmelCase = self.rope_scaling.get('type' , A) _UpperCAmelCase = self.rope_scaling.get('factor' , A) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
339
1
# Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() UpperCAmelCase__ = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model UpperCAmelCase__ = { # fairseq: "wmt19-ru-en": {"length_penalty": 1.1}, "wmt19-en-ru": {"length_penalty": 1.15}, "wmt19-en-de": {"length_penalty": 1.0}, "wmt19-de-en": {"length_penalty": 1.1}, # allenai: "wmt16-en-de-dist-12-1": {"length_penalty": 0.6}, "wmt16-en-de-dist-6-1": {"length_penalty": 0.6}, "wmt16-en-de-12-1": {"length_penalty": 0.8}, "wmt19-de-en-6-6-base": {"length_penalty": 0.6}, "wmt19-de-en-6-6-big": {"length_penalty": 0.6}, } # this remaps the different models to their organization names UpperCAmelCase__ = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: UpperCAmelCase__ = "facebook" for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: UpperCAmelCase__ = "allenai" def A ( _UpperCAmelCase : str ) -> Union[str, Any]: '''simple docstring''' # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} _UpperCAmelCase = dict((re.sub(R'@@$' , '' , _UpperCAmelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCAmelCase ), v) for k, v in d.items() ) _UpperCAmelCase = '<s> <pad> </s> <unk>'.split() # restore the special tokens for k in keep_keys: del da[F"{k}</w>"] _UpperCAmelCase = d[k] # restore return da def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> str: '''simple docstring''' # prep assert os.path.exists(_UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) print(F"Writing results to {pytorch_dump_folder_path}" ) # handle various types of models _UpperCAmelCase = basename(_UpperCAmelCase ) _UpperCAmelCase = dirname(_UpperCAmelCase ) _UpperCAmelCase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel _UpperCAmelCase = cls.hub_models() _UpperCAmelCase = {'bpe': 'fastbpe', 'tokenizer': 'moses'} _UpperCAmelCase = '.' # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(F"using checkpoint {checkpoint_file}" ) _UpperCAmelCase = hub_utils.from_pretrained( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , archive_map=_UpperCAmelCase , **_UpperCAmelCase ) _UpperCAmelCase = vars(chkpt['args']['model'] ) _UpperCAmelCase = args['source_lang'] _UpperCAmelCase = args['target_lang'] _UpperCAmelCase = dirname(_UpperCAmelCase ) _UpperCAmelCase = basename(_UpperCAmelCase ) # dicts _UpperCAmelCase = os.path.join(_UpperCAmelCase , F"dict.{src_lang}.txt" ) _UpperCAmelCase = os.path.join(_UpperCAmelCase , F"dict.{tgt_lang}.txt" ) _UpperCAmelCase = Dictionary.load(_UpperCAmelCase ) _UpperCAmelCase = rewrite_dict_keys(src_dict.indices ) _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = os.path.join(_UpperCAmelCase , 'vocab-src.json' ) print(F"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records" ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab _UpperCAmelCase = True for k in src_vocab.keys(): if not k.islower(): _UpperCAmelCase = False break _UpperCAmelCase = Dictionary.load(_UpperCAmelCase ) _UpperCAmelCase = rewrite_dict_keys(tgt_dict.indices ) _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = os.path.join(_UpperCAmelCase , 'vocab-tgt.json' ) print(F"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records" ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) ) # merges_file (bpecodes) _UpperCAmelCase = os.path.join(_UpperCAmelCase , VOCAB_FILES_NAMES['merges_file'] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" _UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.exists(_UpperCAmelCase ): break with open(_UpperCAmelCase , encoding='utf-8' ) as fin: _UpperCAmelCase = fin.read() _UpperCAmelCase = re.sub(R' \d+$' , '' , _UpperCAmelCase , 0 , re.M ) # remove frequency number print(F"Generating {merges_file}" ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as fout: fout.write(_UpperCAmelCase ) # model config _UpperCAmelCase = os.path.join(_UpperCAmelCase , 'config.json' ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", F"need to extend tokenizer to support bpe={args['bpe']}" assert args["tokenizer"] == "moses", F"need to extend tokenizer to support bpe={args['tokenizer']}" _UpperCAmelCase = { 'architectures': ['FSMTForConditionalGeneration'], 'model_type': 'fsmt', 'activation_dropout': args['activation_dropout'], 'activation_function': 'relu', 'attention_dropout': args['attention_dropout'], 'd_model': args['decoder_embed_dim'], 'dropout': args['dropout'], 'init_std': 0.02, 'max_position_embeddings': args['max_source_positions'], 'num_hidden_layers': args['encoder_layers'], 'src_vocab_size': src_vocab_size, 'tgt_vocab_size': tgt_vocab_size, 'langs': [src_lang, tgt_lang], 'encoder_attention_heads': args['encoder_attention_heads'], 'encoder_ffn_dim': args['encoder_ffn_embed_dim'], 'encoder_layerdrop': args['encoder_layerdrop'], 'encoder_layers': args['encoder_layers'], 'decoder_attention_heads': args['decoder_attention_heads'], 'decoder_ffn_dim': args['decoder_ffn_embed_dim'], 'decoder_layerdrop': args['decoder_layerdrop'], 'decoder_layers': args['decoder_layers'], 'bos_token_id': 0, 'pad_token_id': 1, 'eos_token_id': 2, 'is_encoder_decoder': True, 'scale_embedding': not args['no_scale_embedding'], 'tie_word_embeddings': args['share_all_embeddings'], } # good hparam defaults to start with _UpperCAmelCase = 5 _UpperCAmelCase = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: _UpperCAmelCase = best_score_hparams[model_dir]['length_penalty'] else: _UpperCAmelCase = 1.0 print(F"Generating {fsmt_model_config_file}" ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) ) # tokenizer config _UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = { 'langs': [src_lang, tgt_lang], 'model_max_length': 1_024, 'do_lower_case': do_lower_case, } print(F"Generating {fsmt_tokenizer_config_file}" ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) ) # model _UpperCAmelCase = chkpt['models'][0] _UpperCAmelCase = model.state_dict() # rename keys to start with 'model.' _UpperCAmelCase = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys _UpperCAmelCase = [ 'model.model', 'model.encoder.version', 'model.decoder.version', 'model.encoder_embed_tokens.weight', 'model.decoder_embed_tokens.weight', 'model.encoder.embed_positions._float_tensor', 'model.decoder.embed_positions._float_tensor', ] for k in ignore_keys: model_state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = FSMTConfig.from_pretrained(_UpperCAmelCase ) _UpperCAmelCase = FSMTForConditionalGeneration(_UpperCAmelCase ) # check that it loads ok model_new.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) # save _UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) print(F"Generating {pytorch_weights_dump_path}" ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) print('Conversion is done!' ) print('\nLast step is to upload the files to s3' ) print(F"cd {data_root}" ) print(F"transformers-cli upload {model_dir}" ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fsmt_checkpoint_path", default=None, type=str, required=True, help=( "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts," " bpecodes, etc." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCAmelCase__ = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
339
def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') ) def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = credit_card_number _UpperCAmelCase = 0 _UpperCAmelCase = len(_UpperCAmelCase ) - 2 for i in range(_UpperCAmelCase , -1 , -2 ): # double the value of every second digit _UpperCAmelCase = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 _UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(F"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(_UpperCAmelCase ) <= 16: print(F"{error_message} of its length." ) return False if not validate_initial_digits(_UpperCAmelCase ): print(F"{error_message} of its first two digits." ) return False if not luhn_validation(_UpperCAmelCase ): print(F"{error_message} it fails the Luhn check." ) return False print(F"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("4111111111111111") validate_credit_card_number("32323")
339
1
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __lowerCAmelCase : def __init__( self : Dict , A : Optional[Any] , A : List[str]=99 , A : int=13 , A : str=7 , A : Optional[Any]=9 , A : List[str]=True , A : List[str]=True , A : List[str]=False , A : Tuple=32 , A : Optional[int]=5 , A : Any=4 , A : Any=37 , A : Tuple=8 , A : Optional[int]=0.1 , A : Union[str, Any]=0.0_0_2 , A : Dict=1 , A : int=0 , A : Optional[int]=0 , A : Any=None , A : Tuple=None , ) -> List[str]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = encoder_seq_length _UpperCAmelCase = decoder_seq_length # For common tests _UpperCAmelCase = self.decoder_seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_attention_mask _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = d_ff _UpperCAmelCase = relative_attention_num_buckets _UpperCAmelCase = dropout_rate _UpperCAmelCase = initializer_factor _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = decoder_start_token_id _UpperCAmelCase = None _UpperCAmelCase = decoder_layers def _lowerCamelCase ( self : int) -> Optional[int]: """simple docstring""" return TaConfig.from_pretrained('google/umt5-base') def _lowerCamelCase ( self : Tuple , A : Optional[Any] , A : Union[str, Any] , A : int , A : int=None , A : int=None , A : Any=None , A : Any=None , A : str=None , ) -> str: """simple docstring""" if attention_mask is None: _UpperCAmelCase = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: _UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: _UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=A) if decoder_head_mask is None: _UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=A) if cross_attn_head_mask is None: _UpperCAmelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=A) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def _lowerCamelCase ( self : int) -> Any: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size) _UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1) _UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1) _UpperCAmelCase = self.get_config() _UpperCAmelCase = config.num_attention_heads _UpperCAmelCase = self.prepare_inputs_dict(A , A , A) return config, input_dict def _lowerCamelCase ( self : Tuple) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def _lowerCamelCase ( self : Optional[int]) -> int: """simple docstring""" return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _lowerCamelCase ( self : Dict) -> Any: """simple docstring""" return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _lowerCamelCase ( self : str , A : Dict , A : str , A : Dict , A : int , A : Any , A : List[str] , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = UMTaModel(config=A) model.to(A) model.eval() _UpperCAmelCase = model( input_ids=A , decoder_input_ids=A , attention_mask=A , decoder_attention_mask=A , ) _UpperCAmelCase = model(input_ids=A , decoder_input_ids=A) _UpperCAmelCase = result.last_hidden_state _UpperCAmelCase = result.past_key_values _UpperCAmelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(A) , config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]) , 4) def _lowerCamelCase ( self : Any , A : Optional[Any] , A : int , A : Dict , A : List[Any] , A : Any , A : Optional[Any] , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = UMTaModel(config=A).get_decoder().to(A).eval() # first forward pass _UpperCAmelCase = model(A , use_cache=A) _UpperCAmelCase = model(A) _UpperCAmelCase = model(A , use_cache=A) self.parent.assertTrue(len(A) == len(A)) self.parent.assertTrue(len(A) == len(A) + 1) _UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size) # append to next input_ids and _UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1) _UpperCAmelCase = model(A)['last_hidden_state'] _UpperCAmelCase = model(A , past_key_values=A)['last_hidden_state'] # select random slice _UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1]).item() _UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach() _UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1E-3)) def _lowerCamelCase ( self : str , A : List[Any] , A : List[Any] , ) -> str: """simple docstring""" _UpperCAmelCase = UMTaModel(config=A).to(A).half().eval() _UpperCAmelCase = model(**A)['last_hidden_state'] self.parent.assertFalse(torch.isnan(A).any().item()) @require_torch class __lowerCAmelCase ( A , A , A , unittest.TestCase ): UpperCamelCase = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) UpperCamelCase = (UMTaForConditionalGeneration,) if is_torch_available() else () UpperCamelCase = ( { '''conversational''': UMTaForConditionalGeneration, '''feature-extraction''': UMTaModel, '''summarization''': UMTaForConditionalGeneration, '''text2text-generation''': UMTaForConditionalGeneration, '''translation''': UMTaForConditionalGeneration, '''question-answering''': UMTaForQuestionAnswering, } if is_torch_available() else {} ) UpperCamelCase = True UpperCamelCase = False UpperCamelCase = False UpperCamelCase = True UpperCamelCase = True # The small UMT5 model needs higher percentages for CPU/MP tests UpperCamelCase = [0.8, 0.9] def _lowerCamelCase ( self : List[str]) -> Optional[int]: """simple docstring""" _UpperCAmelCase = UMTaModelTester(self) @unittest.skip('Test has a segmentation fault on torch 1.8.0') def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = UMTaModel(config_and_inputs[0]).to(A) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=A , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , ) @unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision') def _lowerCamelCase ( self : Dict) -> List[str]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*A) def _lowerCamelCase ( self : Tuple) -> Optional[int]: """simple docstring""" _UpperCAmelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions'] _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = config_and_inputs[0] _UpperCAmelCase = UMTaForConditionalGeneration(A).eval() model.to(A) _UpperCAmelCase = { 'head_mask': torch.zeros(config.num_layers , config.num_heads , device=A), 'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=A), 'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=A), } for attn_name, (name, mask) in zip(A , head_masking.items()): _UpperCAmelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": _UpperCAmelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=A) _UpperCAmelCase = model.generate( config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=A , return_dict_in_generate=A , **A , ) # We check the state of decoder_attentions and cross_attentions just from the last step _UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0) @unittest.skip('Does not work on the tiny model as we keep hitting edge cases.') def _lowerCamelCase ( self : List[Any]) -> Tuple: """simple docstring""" pass @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): @slow @unittest.skip( 'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged') def _lowerCamelCase ( self : Any) -> Optional[int]: """simple docstring""" _UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=A).to(A) _UpperCAmelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=A , legacy=A) _UpperCAmelCase = [ 'Bonjour monsieur <extra_id_0> bien <extra_id_1>.', 'No se como puedo <extra_id_0>.', 'This is the reason why we <extra_id_0> them.', 'The <extra_id_0> walks in <extra_id_1>, seats', 'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.', ] _UpperCAmelCase = tokenizer(A , return_tensors='pt' , padding=A).input_ids # fmt: off _UpperCAmelCase = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ]) # fmt: on torch.testing.assert_allclose(A , A) _UpperCAmelCase = model.generate(input_ids.to(A)) _UpperCAmelCase = [ '<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>', '<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', ] _UpperCAmelCase = tokenizer.batch_decode(A) self.assertEqual(A , A)
339
from functools import reduce UpperCAmelCase__ = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def A ( _UpperCAmelCase : str = N ) -> int: '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(_UpperCAmelCase ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
339
1
import os import sys import unittest UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers") class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = find_backend(' if not is_torch_available():') self.assertEqual(A , 'torch') # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):') self.assertEqual(A , 'torch_and_transformers') # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _UpperCAmelCase = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):') self.assertEqual(A , 'torch_and_transformers_and_onnx') def _lowerCamelCase ( self : int) -> Dict: """simple docstring""" _UpperCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , A) self.assertIn('torch_and_transformers' , A) self.assertIn('flax_and_transformers' , A) self.assertIn('torch_and_transformers_and_onnx' , A) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch']) self.assertIn('FlaxUNet2DConditionModel' , objects['flax']) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers']) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers']) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy']) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx']) def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'') self.assertEqual(A , '\nCONSTANT = None\n') _UpperCAmelCase = create_dummy_object('function' , '\'torch\'') self.assertEqual( A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n') _UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' _UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'') self.assertEqual(A , A) def _lowerCamelCase ( self : Dict) -> int: """simple docstring""" _UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' _UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']}) self.assertEqual(dummy_files['torch'] , A)
339
from __future__ import annotations from collections.abc import Callable UpperCAmelCase__ = list[list[float | int]] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for row in range(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = matrix[row][col] _UpperCAmelCase = vector[row][0] _UpperCAmelCase = 0 _UpperCAmelCase = 0 while row < size and col < size: # pivoting _UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: _UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , _UpperCAmelCase ): _UpperCAmelCase = augmented[rowa][col] / augmented[row][col] _UpperCAmelCase = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , _UpperCAmelCase ): for row in range(_UpperCAmelCase ): _UpperCAmelCase = augmented[row][col] / augmented[col][col] for cola in range(_UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase ) ] def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for x_val, y_val in enumerate(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = (x_val + 1) ** (size - col - 1) _UpperCAmelCase = y_val _UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase ) def interpolated_func(_UpperCAmelCase : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(_UpperCAmelCase ) ) return interpolated_func def A ( _UpperCAmelCase : int ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int: '''simple docstring''' _UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )] _UpperCAmelCase = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] _UpperCAmelCase = 0 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for poly in polynomials: _UpperCAmelCase = 1 while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ): x_val += 1 ret += poly(_UpperCAmelCase ) return ret if __name__ == "__main__": print(f"""{solution() = }""")
339
1
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline UpperCAmelCase__ = { "n_samples": 64, "horizon": 32, "num_inference_steps": 20, "n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network "scale_grad_by_std": True, "scale": 0.1, "eta": 0.0, "t_grad_cutoff": 2, "device": "cpu", } if __name__ == "__main__": UpperCAmelCase__ = "hopper-medium-v2" UpperCAmelCase__ = gym.make(env_name) UpperCAmelCase__ = ValueGuidedRLPipeline.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", env=env, ) env.seed(0) UpperCAmelCase__ = env.reset() UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 UpperCAmelCase__ = 1000 UpperCAmelCase__ = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy UpperCAmelCase__ = pipeline(obs, planning_horizon=32) # execute action in environment UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = env.step(denorm_actions) UpperCAmelCase__ = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:""" f""" {total_score}""" ) # save observations for rendering rollout.append(next_observation.copy()) UpperCAmelCase__ = next_observation except KeyboardInterrupt: pass print(f"""Total reward: {total_reward}""")
339
from __future__ import annotations def A ( _UpperCAmelCase : list[int] ) -> bool: '''simple docstring''' return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> Any: '''simple docstring''' # load base model _UpperCAmelCase = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors _UpperCAmelCase = load_file(_UpperCAmelCase ) _UpperCAmelCase = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: _UpperCAmelCase = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' ) _UpperCAmelCase = pipeline.text_encoder else: _UpperCAmelCase = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' ) _UpperCAmelCase = pipeline.unet # find the target layer _UpperCAmelCase = layer_infos.pop(0 ) while len(_UpperCAmelCase ) > -1: try: _UpperCAmelCase = curr_layer.__getattr__(_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: _UpperCAmelCase = layer_infos.pop(0 ) elif len(_UpperCAmelCase ) == 0: break except Exception: if len(_UpperCAmelCase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: _UpperCAmelCase = layer_infos.pop(0 ) _UpperCAmelCase = [] if "lora_down" in key: pair_keys.append(key.replace('lora_down' , 'lora_up' ) ) pair_keys.append(_UpperCAmelCase ) else: pair_keys.append(_UpperCAmelCase ) pair_keys.append(key.replace('lora_up' , 'lora_down' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: _UpperCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) _UpperCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_UpperCAmelCase , _UpperCAmelCase ).unsqueeze(2 ).unsqueeze(3 ) else: _UpperCAmelCase = state_dict[pair_keys[0]].to(torch.floataa ) _UpperCAmelCase = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_UpperCAmelCase , _UpperCAmelCase ) # update visited list for item in pair_keys: visited.append(_UpperCAmelCase ) return pipeline if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument( "--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format." ) parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors" ) parser.add_argument( "--lora_prefix_text_encoder", default="lora_te", type=str, help="The prefix of text encoder weight in safetensors", ) parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW") parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not." ) parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") UpperCAmelCase__ = parser.parse_args() UpperCAmelCase__ = args.base_model_path UpperCAmelCase__ = args.checkpoint_path UpperCAmelCase__ = args.dump_path UpperCAmelCase__ = args.lora_prefix_unet UpperCAmelCase__ = args.lora_prefix_text_encoder UpperCAmelCase__ = args.alpha UpperCAmelCase__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) UpperCAmelCase__ = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
339
import os UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} def A ( _UpperCAmelCase : str ) -> int: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(_UpperCAmelCase ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A ( _UpperCAmelCase : int ) -> str: '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1_000 numerals += m_count * "M" num %= 1_000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int: '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase ) _UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase ) savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase ) return savings if __name__ == "__main__": print(f"""{solution() = }""")
339
1
import sys UpperCAmelCase__ = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def A ( _UpperCAmelCase : str = N ) -> int: '''simple docstring''' _UpperCAmelCase = -sys.maxsize - 1 for i in range(len(_UpperCAmelCase ) - 12 ): _UpperCAmelCase = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: _UpperCAmelCase = product return largest_product if __name__ == "__main__": print(f"""{solution() = }""")
339
import requests from bsa import BeautifulSoup def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str: '''simple docstring''' _UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' ) _UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} ) _UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' ) return anchors[2].get_text() if __name__ == "__main__": UpperCAmelCase__ = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
339
1
from __future__ import annotations from typing import TypedDict class __lowerCAmelCase ( A ): UpperCamelCase = 42 UpperCamelCase = 42 def A ( _UpperCAmelCase : str ) -> list[str]: '''simple docstring''' if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError('The parameter s type must be str.' ) return [s[i:] + s[:i] for i in range(len(_UpperCAmelCase ) )] def A ( _UpperCAmelCase : str ) -> BWTTransformDict: '''simple docstring''' if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError('The parameter s type must be str.' ) if not s: raise ValueError('The parameter s must not be empty.' ) _UpperCAmelCase = all_rotations(_UpperCAmelCase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation _UpperCAmelCase = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(_UpperCAmelCase ), } return response def A ( _UpperCAmelCase : str , _UpperCAmelCase : int ) -> str: '''simple docstring''' if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError('The parameter bwt_string type must be str.' ) if not bwt_string: raise ValueError('The parameter bwt_string must not be empty.' ) try: _UpperCAmelCase = int(_UpperCAmelCase ) except ValueError: raise TypeError( 'The parameter idx_original_string type must be int or passive' ' of cast to int.' ) if idx_original_string < 0: raise ValueError('The parameter idx_original_string must not be lower than 0.' ) if idx_original_string >= len(_UpperCAmelCase ): raise ValueError( 'The parameter idx_original_string must be lower than' ' len(bwt_string).' ) _UpperCAmelCase = [''] * len(_UpperCAmelCase ) for _ in range(len(_UpperCAmelCase ) ): for i in range(len(_UpperCAmelCase ) ): _UpperCAmelCase = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": UpperCAmelCase__ = "Provide a string that I will generate its BWT transform: " UpperCAmelCase__ = input(entry_msg).strip() UpperCAmelCase__ = bwt_transform(s) print( f"""Burrows Wheeler transform for string '{s}' results """ f"""in '{result["bwt_string"]}'""" ) UpperCAmelCase__ = reverse_bwt(result["bwt_string"], result["idx_original_string"]) print( f"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """ f"""we get original string '{original_string}'""" )
339
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_attention_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_choices def _lowerCamelCase ( self : Optional[Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCAmelCase = None if self.use_attention_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCAmelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCamelCase ( self : List[Any]) -> List[str]: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = True UpperCamelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" _UpperCAmelCase = FlaxRoFormerModelTester(self) @slow def _lowerCamelCase ( self : List[Any]) -> Dict: """simple docstring""" for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A) _UpperCAmelCase = model(np.ones((1, 1))) self.assertIsNotNone(A) @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def _lowerCamelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base') _UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]]) _UpperCAmelCase = model(A)[0] _UpperCAmelCase = 5_00_00 _UpperCAmelCase = (1, 6, vocab_size) self.assertEqual(output.shape , A) _UpperCAmelCase = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]]) self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
339
1
from sklearn.metrics import mean_squared_error import datasets UpperCAmelCase__ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" UpperCAmelCase__ = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" UpperCAmelCase__ = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : Any) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , reference_urls=[ 'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html' ] , ) def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]: """simple docstring""" if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('float')), "references": datasets.Sequence(datasets.Value('float')), } else: return { "predictions": datasets.Value('float'), "references": datasets.Value('float'), } def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str] , A : str=None , A : List[str]="uniform_average" , A : Tuple=True) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = mean_squared_error( A , A , sample_weight=A , multioutput=A , squared=A) return {"mse": mse}
339
UpperCAmelCase__ = {} def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: '''simple docstring''' # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _UpperCAmelCase = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 ) _UpperCAmelCase = state_late + state_absent + state_ontime _UpperCAmelCase = prizestrings return prizestrings def A ( _UpperCAmelCase : int = 30 ) -> int: '''simple docstring''' return _calculate(_UpperCAmelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
339
1
def A ( _UpperCAmelCase : list[int] , _UpperCAmelCase : str ) -> list[int]: '''simple docstring''' _UpperCAmelCase = int(_UpperCAmelCase ) # Initialize Result _UpperCAmelCase = [] # Traverse through all denomination for denomination in reversed(_UpperCAmelCase ): # Find denominations while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ): total_value -= int(_UpperCAmelCase ) answer.append(_UpperCAmelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCAmelCase__ = [] UpperCAmelCase__ = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): UpperCAmelCase__ = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(f"""Denomination {i}: """).strip())) UpperCAmelCase__ = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter UpperCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000] UpperCAmelCase__ = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(f"""Following is minimal change for {value}: """) UpperCAmelCase__ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
339
import os import sys import unittest UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers") class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = find_backend(' if not is_torch_available():') self.assertEqual(A , 'torch') # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):') self.assertEqual(A , 'torch_and_transformers') # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _UpperCAmelCase = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):') self.assertEqual(A , 'torch_and_transformers_and_onnx') def _lowerCamelCase ( self : int) -> Dict: """simple docstring""" _UpperCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , A) self.assertIn('torch_and_transformers' , A) self.assertIn('flax_and_transformers' , A) self.assertIn('torch_and_transformers_and_onnx' , A) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch']) self.assertIn('FlaxUNet2DConditionModel' , objects['flax']) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers']) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers']) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy']) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx']) def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'') self.assertEqual(A , '\nCONSTANT = None\n') _UpperCAmelCase = create_dummy_object('function' , '\'torch\'') self.assertEqual( A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n') _UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' _UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'') self.assertEqual(A , A) def _lowerCamelCase ( self : Dict) -> int: """simple docstring""" _UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' _UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']}) self.assertEqual(dummy_files['torch'] , A)
339
1
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = ReformerTokenizer UpperCamelCase = ReformerTokenizerFast UpperCamelCase = True UpperCamelCase = False UpperCamelCase = True def _lowerCamelCase ( self : Optional[Any]) -> Tuple: """simple docstring""" super().setUp() _UpperCAmelCase = ReformerTokenizer(A , keep_accents=A) tokenizer.save_pretrained(self.tmpdirname) def _lowerCamelCase ( self : str) -> List[str]: """simple docstring""" _UpperCAmelCase = '<s>' _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A) , A) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A) , A) def _lowerCamelCase ( self : Any) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<unk>') self.assertEqual(vocab_keys[1] , '<s>') self.assertEqual(vocab_keys[-1] , 'j') self.assertEqual(len(A) , 10_00) def _lowerCamelCase ( self : int) -> List[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_00) def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = tokenizer.tokenize(A) _UpperCAmelCase = rust_tokenizer.tokenize(A) self.assertListEqual(A , A) _UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A) _UpperCAmelCase = rust_tokenizer.encode(A , add_special_tokens=A) self.assertListEqual(A , A) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(A) _UpperCAmelCase = rust_tokenizer.encode(A) self.assertListEqual(A , A) def _lowerCamelCase ( self : Optional[Any] , A : Dict=15) -> Any: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A) # Simple input _UpperCAmelCase = 'This is a simple input' _UpperCAmelCase = ['This is a simple input 1', 'This is a simple input 2'] _UpperCAmelCase = ('This is a simple input', 'This is a pair') _UpperCAmelCase = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='max_length') # Simple input self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='max_length') # Simple input self.assertRaises( A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='max_length' , ) # Pair input self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='max_length') # Pair input self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='max_length') # Pair input self.assertRaises( A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='max_length' , ) def _lowerCamelCase ( self : Union[str, Any]) -> str: """simple docstring""" pass def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]: """simple docstring""" _UpperCAmelCase = ReformerTokenizer(A , keep_accents=A) _UpperCAmelCase = tokenizer.tokenize('This is a test') self.assertListEqual(A , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(A) , [2_85, 46, 10, 1_70, 3_82] , ) _UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( A , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) _UpperCAmelCase = tokenizer.convert_tokens_to_ids(A) self.assertListEqual( A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(A) self.assertListEqual( A , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def _lowerCamelCase ( self : Optional[int]) -> int: """simple docstring""" return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment') @slow def _lowerCamelCase ( self : List[str]) -> str: """simple docstring""" _UpperCAmelCase = 'Hello World!' _UpperCAmelCase = [1_26, 32, 2_62, 1_52, 38, 72, 2_87] self.assertListEqual(A , self.big_tokenizer.encode(A)) @slow def _lowerCamelCase ( self : int) -> Any: """simple docstring""" _UpperCAmelCase = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) _UpperCAmelCase = [ 1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 35, 28, 2_75, 3, 2_59, 2_97, 2_60, 84, 4, 35, 1_10, 44, 8, 2_59, 91, 2_68, 21, 11, 2_09, 2_74, 1_09, 2_66, 2_77, 1_17, 86, 93, 3_15, 2_58, 2_78, 2_58, 2_77, 2_58, 0, 2_58, 2_88, 2_58, 3_19, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 2_87, 2_58, 3_15, 2_58, 2_89, 2_58, 2_78, 99, 2_69, 2_66, 2_62, 8, 2_59, 2_41, 4, 2_17, 2_30, 2_68, 2_66, 55, 1_68, 1_06, 75, 1_93, 2_66, 2_23, 27, 49, 26, 2_82, 25, 2_64, 2_99, 19, 26, 0, 2_58, 2_77, 1_17, 86, 93, 1_76, 1_83, 2_70, 11, 2_62, 42, 61, 2_65, ] self.assertListEqual(A , self.big_tokenizer.encode(A)) @require_torch @slow def _lowerCamelCase ( self : int) -> Optional[int]: """simple docstring""" import torch from transformers import ReformerConfig, ReformerModel # Build sequence _UpperCAmelCase = list(self.big_tokenizer.get_vocab().keys())[:10] _UpperCAmelCase = ' '.join(A) _UpperCAmelCase = self.big_tokenizer.encode_plus(A , return_tensors='pt') _UpperCAmelCase = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='pt') _UpperCAmelCase = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) _UpperCAmelCase = encoded_sequence['input_ids'].shape _UpperCAmelCase = ReformerModel(A) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**A) model(**A) @slow def _lowerCamelCase ( self : List[str]) -> List[str]: """simple docstring""" _UpperCAmelCase = {'input_ids': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 _UpperCAmelCase = [ 'This is a very simple sentence.', 'The quick brown fox jumps over the lazy dog.', ] self.tokenizer_integration_test_util( expected_encoding=A , model_name='google/reformer-crime-and-punishment' , revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a' , padding=A , sequences=A , )
339
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") UpperCAmelCase__ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , ) UpperCamelCase = field( default=1_0_2_4 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} ) def _lowerCamelCase ( self : str) -> List[Any]: """simple docstring""" if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.') else: _UpperCAmelCase = self.train_file.split('.')[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." _UpperCAmelCase = self.validation_file.split('.')[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __lowerCAmelCase : UpperCamelCase = field( default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) def A ( ) -> Optional[int]: '''simple docstring''' # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_UpperCAmelCase ) datasets.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. _UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: _UpperCAmelCase = data_args.train_file.split('.' )[-1] _UpperCAmelCase = data_args.test_file.split('.' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." _UpperCAmelCase = data_args.test_file else: raise ValueError('Need either a GLUE task or a test file for `do_predict`.' ) for key in data_files.keys(): logger.info(F"load a local file for {key}: {data_files[key]}" ) if data_args.train_file.endswith('.csv' ): # Loading a dataset from local csv files _UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files _UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels _UpperCAmelCase = raw_datasets['train'].features['label'].names _UpperCAmelCase = len(_UpperCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer _UpperCAmelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , ) _UpperCAmelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: _UpperCAmelCase = 'max_length' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _UpperCAmelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. _UpperCAmelCase = {'Refused': 0, 'Entailed': 1} _UpperCAmelCase = {0: 'Refused', 1: 'Entailed'} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ): # Tokenize the texts def _convert_table_text_to_pandas(_UpperCAmelCase : Dict ): _UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )] _UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd _UpperCAmelCase = examples['statement'] _UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) ) _UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ) _UpperCAmelCase = examples['label'] return result with training_args.main_process_first(desc='dataset map pre-processing' ): _UpperCAmelCase = raw_datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) _UpperCAmelCase = raw_datasets['train'] if data_args.max_train_samples is not None: _UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) _UpperCAmelCase = raw_datasets['validation'] if data_args.max_eval_samples is not None: _UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('--do_predict requires a test dataset' ) _UpperCAmelCase = raw_datasets['test'] if data_args.max_predict_samples is not None: _UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ): logger.info(F"Sample {index} of the training set: {train_dataset[index]}." ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_UpperCAmelCase : EvalPrediction ): _UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _UpperCAmelCase = default_data_collator elif training_args.fpaa: _UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) else: _UpperCAmelCase = None # Initialize our Trainer _UpperCAmelCase = Trainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase ) _UpperCAmelCase = train_result.metrics _UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase ) ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('train' , _UpperCAmelCase ) trainer.save_metrics('train' , _UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase ) _UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.log_metrics('eval' , _UpperCAmelCase ) trainer.save_metrics('eval' , _UpperCAmelCase ) if training_args.do_predict: logger.info('*** Predict ***' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. _UpperCAmelCase = predict_dataset.remove_columns('label' ) _UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) _UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' ) if trainer.is_world_process_zero(): with open(_UpperCAmelCase , 'w' ) as writer: logger.info('***** Predict Results *****' ) writer.write('index\tprediction\n' ) for index, item in enumerate(_UpperCAmelCase ): _UpperCAmelCase = label_list[item] writer.write(F"{index}\t{item}\n" ) _UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'} if training_args.push_to_hub: trainer.push_to_hub(**_UpperCAmelCase ) else: trainer.create_model_card(**_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
1
import math def A ( _UpperCAmelCase : int ) -> list[int]: '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 2 _UpperCAmelCase = int(math.sqrt(_UpperCAmelCase ) ) # Size of every segment _UpperCAmelCase = [True] * (end + 1) _UpperCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(_UpperCAmelCase ) for i in range(start * start , end + 1 , _UpperCAmelCase ): _UpperCAmelCase = False start += 1 prime += in_prime _UpperCAmelCase = end + 1 _UpperCAmelCase = min(2 * end , _UpperCAmelCase ) while low <= n: _UpperCAmelCase = [True] * (high - low + 1) for each in in_prime: _UpperCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(_UpperCAmelCase , high + 1 , _UpperCAmelCase ): _UpperCAmelCase = False for j in range(len(_UpperCAmelCase ) ): if temp[j] is True: prime.append(j + low ) _UpperCAmelCase = high + 1 _UpperCAmelCase = min(high + end , _UpperCAmelCase ) return prime print(sieve(10**6))
339
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any: '''simple docstring''' _UpperCAmelCase = multiprocessing.Manager() _UpperCAmelCase = manager.list() _UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('timed out' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil _UpperCAmelCase = shutil.rmtree _UpperCAmelCase = os.rmdir _UpperCAmelCase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: _UpperCAmelCase = {} with swallow_io(): with time_limit(_UpperCAmelCase ): exec(_UpperCAmelCase , _UpperCAmelCase ) result.append('passed' ) except TimeoutException: result.append('timed out' ) except BaseException as e: result.append(F"failed: {e}" ) # Needed for cleaning up. _UpperCAmelCase = rmtree _UpperCAmelCase = rmdir _UpperCAmelCase = chdir @contextlib.contextmanager def A ( _UpperCAmelCase : Union[str, Any] ) -> Any: '''simple docstring''' def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ): raise TimeoutException('Timed out!' ) signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase ) signal.signal(signal.SIGALRM , _UpperCAmelCase ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def A ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = WriteOnlyStringIO() with contextlib.redirect_stdout(_UpperCAmelCase ): with contextlib.redirect_stderr(_UpperCAmelCase ): with redirect_stdin(_UpperCAmelCase ): yield @contextlib.contextmanager def A ( ) -> Any: '''simple docstring''' with tempfile.TemporaryDirectory() as dirname: with chdir(_UpperCAmelCase ): yield dirname class __lowerCAmelCase ( A ): pass class __lowerCAmelCase ( io.StringIO ): def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any: """simple docstring""" raise OSError def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]: """simple docstring""" raise OSError def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]: """simple docstring""" raise OSError def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]: """simple docstring""" return False class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore UpperCamelCase = '''stdin''' @contextlib.contextmanager def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' if root == ".": yield return _UpperCAmelCase = os.getcwd() os.chdir(_UpperCAmelCase ) try: yield except BaseException as exc: raise exc finally: os.chdir(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str]=None ) -> Any: '''simple docstring''' if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins _UpperCAmelCase = None _UpperCAmelCase = None import os _UpperCAmelCase = '1' _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import shutil _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import subprocess _UpperCAmelCase = None # type: ignore _UpperCAmelCase = None import sys _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None
339
1
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__) class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ): UpperCamelCase = None UpperCamelCase = None class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ): UpperCamelCase = datasets.Audio() UpperCamelCase = '''audio''' UpperCamelCase = AudioFolderConfig UpperCamelCase = 42 # definition at the bottom of the script UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' ) UpperCAmelCase__ = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] UpperCAmelCase__ = AUDIO_EXTENSIONS
339
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str: '''simple docstring''' try: _UpperCAmelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _UpperCAmelCase = default else: # KEY is set, convert it to True or False. try: _UpperCAmelCase = strtobool(_UpperCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False) def A ( _UpperCAmelCase : List[str] ) -> List[str]: '''simple docstring''' return unittest.skip('Test was skipped' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> str: '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> str: '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> Dict: '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> List[str]: '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : str ) -> str: '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> str: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> int: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> Any: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict: '''simple docstring''' if test_case is None: return partial(_UpperCAmelCase , version=_UpperCAmelCase ) return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> int: '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase ) UpperCAmelCase__ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def A ( _UpperCAmelCase : List[str] ) -> Any: '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase ) class __lowerCAmelCase ( unittest.TestCase ): UpperCamelCase = True @classmethod def _lowerCamelCase ( cls : List[Any]) -> Tuple: """simple docstring""" _UpperCAmelCase = tempfile.mkdtemp() @classmethod def _lowerCamelCase ( cls : Union[str, Any]) -> str: """simple docstring""" if os.path.exists(cls.tmpdir): shutil.rmtree(cls.tmpdir) def _lowerCamelCase ( self : List[str]) -> List[Any]: """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir).glob('**/*'): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(A) class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Dict) -> Tuple: """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple: """simple docstring""" _UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop) def A ( _UpperCAmelCase : List[Any] ) -> int: '''simple docstring''' _UpperCAmelCase = AcceleratorState() _UpperCAmelCase = tensor[None].clone().to(state.device ) _UpperCAmelCase = gather(_UpperCAmelCase ).cpu() _UpperCAmelCase = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , _UpperCAmelCase ): return False return True class __lowerCAmelCase : def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]: """simple docstring""" _UpperCAmelCase = returncode _UpperCAmelCase = stdout _UpperCAmelCase = stderr async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]: '''simple docstring''' while True: _UpperCAmelCase = await stream.readline() if line: callback(_UpperCAmelCase ) else: break async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput: '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(_UpperCAmelCase ) ) _UpperCAmelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _UpperCAmelCase = [] _UpperCAmelCase = [] def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ): _UpperCAmelCase = line.decode('utf-8' ).rstrip() sink.append(_UpperCAmelCase ) if not quiet: print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ), ] , timeout=_UpperCAmelCase , ) return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase ) def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput: '''simple docstring''' _UpperCAmelCase = asyncio.get_event_loop() _UpperCAmelCase = loop.run_until_complete( _stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) ) _UpperCAmelCase = ' '.join(_UpperCAmelCase ) if result.returncode > 0: _UpperCAmelCase = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class __lowerCAmelCase ( A ): pass def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple: '''simple docstring''' try: _UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_UpperCAmelCase , 'decode' ): _UpperCAmelCase = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
339
1
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Dict) -> int: """simple docstring""" super().tearDown() gc.collect() def _lowerCamelCase ( self : Union[str, Any]) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , ) _UpperCAmelCase = 'A painting of a squirrel eating a burger' _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = sd_pipe.prepare_inputs(A) _UpperCAmelCase = replicate(A) _UpperCAmelCase = shard(A) _UpperCAmelCase = jax.random.PRNGKey(0) _UpperCAmelCase = jax.random.split(A , jax.device_count()) _UpperCAmelCase = sd_pipe(A , A , A , num_inference_steps=25 , jit=A)[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) _UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) _UpperCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1] _UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten())) _UpperCAmelCase = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2]) print(F"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1E-2 def _lowerCamelCase ( self : str) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = 'stabilityai/stable-diffusion-2' _UpperCAmelCase , _UpperCAmelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(A , subfolder='scheduler') _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( A , scheduler=A , revision='bf16' , dtype=jnp.bfloataa , ) _UpperCAmelCase = scheduler_params _UpperCAmelCase = 'A painting of a squirrel eating a burger' _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = sd_pipe.prepare_inputs(A) _UpperCAmelCase = replicate(A) _UpperCAmelCase = shard(A) _UpperCAmelCase = jax.random.PRNGKey(0) _UpperCAmelCase = jax.random.split(A , jax.device_count()) _UpperCAmelCase = sd_pipe(A , A , A , num_inference_steps=25 , jit=A)[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) _UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) _UpperCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1] _UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten())) _UpperCAmelCase = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7]) print(F"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1E-2
339
from __future__ import annotations UpperCAmelCase__ = list[list[int]] # assigning initial values to the grid UpperCAmelCase__ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase__ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool: '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None: '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def A ( _UpperCAmelCase : Matrix ) -> Matrix | None: '''simple docstring''' if location := find_empty_location(_UpperCAmelCase ): _UpperCAmelCase , _UpperCAmelCase = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): _UpperCAmelCase = digit if sudoku(_UpperCAmelCase ) is not None: return grid _UpperCAmelCase = 0 return None def A ( _UpperCAmelCase : Matrix ) -> None: '''simple docstring''' for row in grid: for cell in row: print(_UpperCAmelCase , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") UpperCAmelCase__ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
339
1
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : def __init__( self : int , A : int , A : str=13 , A : int=7 , A : Union[str, Any]=True , A : Dict=True , A : Tuple=True , A : Tuple=True , A : Optional[int]=99 , A : List[Any]=32 , A : Any=5 , A : Tuple=4 , A : int=37 , A : int="gelu" , A : Union[str, Any]=0.1 , A : Optional[Any]=0.1 , A : Any=1_28 , A : Any=32 , A : List[str]=16 , A : str=2 , A : Tuple=0.0_2 , A : Optional[int]=3 , A : List[Any]=4 , A : List[str]=None , ) -> str: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def _lowerCamelCase ( self : Tuple) -> List[Any]: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCamelCase ( self : str) -> Tuple: """simple docstring""" return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , ) def _lowerCamelCase ( self : Any) -> List[Any]: """simple docstring""" ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = self.prepare_config_and_inputs() _UpperCAmelCase = True _UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _lowerCamelCase ( self : List[Any] , A : Tuple , A : str , A : str , A : List[str] , A : str , A : Any , A : Optional[Any]) -> List[str]: """simple docstring""" _UpperCAmelCase = NezhaModel(config=A) model.to(A) model.eval() _UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A) _UpperCAmelCase = model(A , token_type_ids=A) _UpperCAmelCase = model(A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def _lowerCamelCase ( self : int , A : List[Any] , A : Optional[int] , A : int , A : List[str] , A : Tuple , A : Union[str, Any] , A : List[str] , A : int , A : Optional[int] , ) -> Dict: """simple docstring""" _UpperCAmelCase = True _UpperCAmelCase = NezhaModel(A) model.to(A) model.eval() _UpperCAmelCase = model( A , attention_mask=A , token_type_ids=A , encoder_hidden_states=A , encoder_attention_mask=A , ) _UpperCAmelCase = model( A , attention_mask=A , token_type_ids=A , encoder_hidden_states=A , ) _UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Tuple , A : Optional[Any] , A : Union[str, Any] , A : str , A : Optional[Any] , A : int) -> Tuple: """simple docstring""" _UpperCAmelCase = NezhaForMaskedLM(config=A) model.to(A) model.eval() _UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A , labels=A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _lowerCamelCase ( self : List[Any] , A : List[Any] , A : str , A : List[str] , A : str , A : int , A : Dict , A : int) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = NezhaForNextSentencePrediction(config=A) model.to(A) model.eval() _UpperCAmelCase = model( A , attention_mask=A , token_type_ids=A , labels=A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def _lowerCamelCase ( self : List[Any] , A : List[str] , A : List[str] , A : Dict , A : List[str] , A : List[str] , A : Optional[int] , A : List[Any]) -> List[str]: """simple docstring""" _UpperCAmelCase = NezhaForPreTraining(config=A) model.to(A) model.eval() _UpperCAmelCase = model( A , attention_mask=A , token_type_ids=A , labels=A , next_sentence_label=A , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def _lowerCamelCase ( self : List[str] , A : Any , A : Tuple , A : Union[str, Any] , A : Tuple , A : List[str] , A : Union[str, Any] , A : List[str]) -> Optional[int]: """simple docstring""" _UpperCAmelCase = NezhaForQuestionAnswering(config=A) model.to(A) model.eval() _UpperCAmelCase = model( A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _lowerCamelCase ( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : List[str] , A : Tuple , A : str , A : List[Any] , A : Tuple) -> int: """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = NezhaForSequenceClassification(A) model.to(A) model.eval() _UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A , labels=A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _lowerCamelCase ( self : Tuple , A : Any , A : List[Any] , A : Optional[int] , A : Optional[Any] , A : str , A : Union[str, Any] , A : Optional[int]) -> Tuple: """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = NezhaForTokenClassification(config=A) model.to(A) model.eval() _UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A , labels=A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _lowerCamelCase ( self : Union[str, Any] , A : List[str] , A : Union[str, Any] , A : List[str] , A : Tuple , A : Any , A : Tuple , A : List[Any]) -> Any: """simple docstring""" _UpperCAmelCase = self.num_choices _UpperCAmelCase = NezhaForMultipleChoice(config=A) model.to(A) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCAmelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCAmelCase = model( A , attention_mask=A , token_type_ids=A , labels=A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def _lowerCamelCase ( self : Dict) -> Dict: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( A , A , A , unittest.TestCase ): UpperCamelCase = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) UpperCamelCase = ( { '''feature-extraction''': NezhaModel, '''fill-mask''': NezhaForMaskedLM, '''question-answering''': NezhaForQuestionAnswering, '''text-classification''': NezhaForSequenceClassification, '''token-classification''': NezhaForTokenClassification, '''zero-shot''': NezhaForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase = True def _lowerCamelCase ( self : Tuple , A : Optional[int] , A : List[Any] , A : List[str]=False) -> List[str]: """simple docstring""" _UpperCAmelCase = super()._prepare_for_class(A , A , return_labels=A) if return_labels: if model_class in get_values(A): _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A) return inputs_dict def _lowerCamelCase ( self : List[Any]) -> Any: """simple docstring""" _UpperCAmelCase = NezhaModelTester(self) _UpperCAmelCase = ConfigTester(self , config_class=A , hidden_size=37) def _lowerCamelCase ( self : List[Any]) -> Dict: """simple docstring""" self.config_tester.run_common_tests() def _lowerCamelCase ( self : Any) -> List[str]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A) def _lowerCamelCase ( self : int) -> List[str]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*A) def _lowerCamelCase ( self : Dict) -> List[str]: """simple docstring""" ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() _UpperCAmelCase = None self.model_tester.create_and_check_model_as_decoder( A , A , A , A , A , A , A , A , A , ) def _lowerCamelCase ( self : List[Any]) -> Dict: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A) def _lowerCamelCase ( self : int) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A) def _lowerCamelCase ( self : int) -> int: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*A) def _lowerCamelCase ( self : Dict) -> List[str]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*A) def _lowerCamelCase ( self : List[str]) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A) def _lowerCamelCase ( self : str) -> str: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A) def _lowerCamelCase ( self : str) -> Dict: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A) @slow def _lowerCamelCase ( self : List[Any]) -> Any: """simple docstring""" for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = NezhaModel.from_pretrained(A) self.assertIsNotNone(A) @slow @require_torch_gpu def _lowerCamelCase ( self : int) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return _UpperCAmelCase = True _UpperCAmelCase = model_class(config=A) _UpperCAmelCase = self._prepare_for_class(A , A) _UpperCAmelCase = torch.jit.trace( A , (inputs_dict['input_ids'].to('cpu'), inputs_dict['attention_mask'].to('cpu'))) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(A , os.path.join(A , 'bert.pt')) _UpperCAmelCase = torch.jit.load(os.path.join(A , 'bert.pt') , map_location=A) loaded(inputs_dict['input_ids'].to(A) , inputs_dict['attention_mask'].to(A)) @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def _lowerCamelCase ( self : Any) -> Tuple: """simple docstring""" _UpperCAmelCase = NezhaModel.from_pretrained('sijunhe/nezha-cn-base') _UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]]) _UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1]]) with torch.no_grad(): _UpperCAmelCase = model(A , attention_mask=A)[0] _UpperCAmelCase = torch.Size((1, 6, 7_68)) self.assertEqual(output.shape , A) _UpperCAmelCase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4)) @slow def _lowerCamelCase ( self : Dict) -> Tuple: """simple docstring""" _UpperCAmelCase = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base') _UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]]) _UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1]]) with torch.no_grad(): _UpperCAmelCase = model(A , attention_mask=A)[0] _UpperCAmelCase = torch.Size((1, 6, 2_11_28)) self.assertEqual(output.shape , A) _UpperCAmelCase = torch.tensor( [[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4))
339
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : List[Any]) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence'), 'references': datasets.Value('string' , id='sequence'), }) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]: """simple docstring""" import nltk nltk.download('wordnet') if NLTK_VERSION >= version.Version('3.6.5'): nltk.download('punkt') if NLTK_VERSION >= version.Version('3.6.6'): nltk.download('omw-1.4') def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any: """simple docstring""" if NLTK_VERSION >= version.Version('3.6.5'): _UpperCAmelCase = [ meteor_score.single_meteor_score( word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] else: _UpperCAmelCase = [ meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] return {"meteor": np.mean(A)}
339
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ = 16 UpperCAmelCase__ = 32 def A ( _UpperCAmelCase : Accelerator , _UpperCAmelCase : int = 16 ) -> List[str]: '''simple docstring''' _UpperCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' ) _UpperCAmelCase = load_dataset('glue' , 'mrpc' ) def tokenize_function(_UpperCAmelCase : Any ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_UpperCAmelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( _UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , ) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) _UpperCAmelCase = DataLoader( tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ = mocked_dataloaders # noqa: F811 def A ( _UpperCAmelCase : int , _UpperCAmelCase : str ) -> int: '''simple docstring''' # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , _UpperCAmelCase ) == "1": _UpperCAmelCase = 2 # New Code # _UpperCAmelCase = int(args.gradient_accumulation_steps ) # Initialize accelerator _UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( 'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config['lr'] _UpperCAmelCase = int(config['num_epochs'] ) _UpperCAmelCase = int(config['seed'] ) _UpperCAmelCase = int(config['batch_size'] ) _UpperCAmelCase = evaluate.load('glue' , 'mrpc' ) set_seed(_UpperCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() , lr=_UpperCAmelCase ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Now we train the model for epoch in range(_UpperCAmelCase ): model.train() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_UpperCAmelCase ): _UpperCAmelCase = model(**_UpperCAmelCase ) _UpperCAmelCase = output.loss accelerator.backward(_UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**_UpperCAmelCase ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , _UpperCAmelCase ) def A ( ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) # New Code # parser.add_argument( '--gradient_accumulation_steps' , type=_UpperCAmelCase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
339
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration UpperCAmelCase__ = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' _UpperCAmelCase = ['layers', 'blocks'] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def A ( _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = list(s_dict.keys() ) for key in keys: _UpperCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: _UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase ) print(F"{key} -> {new_key}" ) _UpperCAmelCase = s_dict.pop(_UpperCAmelCase ) return s_dict def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = emb.weight.shape _UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) _UpperCAmelCase = emb.weight.data return lin_layer def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes: '''simple docstring''' os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) _UpperCAmelCase = os.path.basename(_UpperCAmelCase ) _UpperCAmelCase = url.split('/' )[-2] _UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ): raise RuntimeError(F"{download_target} exists and is not a regular file" ) if os.path.isfile(_UpperCAmelCase ): _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output: with tqdm( total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop: while True: _UpperCAmelCase = source.read(8_192 ) if not buffer: break output.write(_UpperCAmelCase ) loop.update(len(_UpperCAmelCase ) ) _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( 'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' ) return model_bytes def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' if ".pt" not in checkpoint_path: _UpperCAmelCase = _download(_MODELS[checkpoint_path] ) else: _UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' ) _UpperCAmelCase = original_checkpoint['dims'] _UpperCAmelCase = original_checkpoint['model_state_dict'] _UpperCAmelCase = state_dict['decoder.token_embedding.weight'] remove_ignore_keys_(_UpperCAmelCase ) rename_keys(_UpperCAmelCase ) _UpperCAmelCase = True _UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0] _UpperCAmelCase = WhisperConfig( vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , ) _UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F" but all the following weights are missing {missing}" ) if tie_embeds: _UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _UpperCAmelCase = proj_out_weights model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase__ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
339
1
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=None ) -> Optional[Any]: '''simple docstring''' # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F"{torch_layer} layer.weight does not match" _UpperCAmelCase = nn.Parameter(_UpperCAmelCase ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F"{torch_layer} layer.bias does not match" _UpperCAmelCase = nn.Parameter(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str ) -> List[Any]: '''simple docstring''' # set torch weights for 1-to-1 comparison _UpperCAmelCase = np.asarray(weights[0] ) _UpperCAmelCase = np.asarray(weights[1] ) _UpperCAmelCase = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , ) set_param( torch_layer.output.dense , torch.tensor(_UpperCAmelCase ).view(-1 , _UpperCAmelCase ).contiguous().transpose(0 , 1 ) , ) def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Union[str, Any]: '''simple docstring''' # set torch weights for 1-to-1 comparison _UpperCAmelCase = np.asarray(weights[0] ) _UpperCAmelCase = np.asarray(weights[1] ) _UpperCAmelCase = np.asarray(weights[2] ) _UpperCAmelCase = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , ) set_param( torch_layer.self_attention.key , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , ) set_param( torch_layer.output.dense , torch.tensor(_UpperCAmelCase ).view(-1 , _UpperCAmelCase ).contiguous().transpose(0 , 1 ) , ) def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> Any: '''simple docstring''' # layernorm 1 _UpperCAmelCase = weights[0][0][0] _UpperCAmelCase = np.asarray(layer_norm_a[0] ) _UpperCAmelCase = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , ) # lsh weights + output _UpperCAmelCase = weights[0][1] if len(_UpperCAmelCase ) < 4: set_layer_weights_in_torch_lsh(_UpperCAmelCase , torch_block.attention , _UpperCAmelCase ) else: set_layer_weights_in_torch_local(_UpperCAmelCase , torch_block.attention , _UpperCAmelCase ) # intermediate weighs _UpperCAmelCase = weights[2][0][1][2] # Chunked Feed Forward if len(_UpperCAmelCase ) == 4: _UpperCAmelCase = intermediate_weights[2] # layernorm 2 _UpperCAmelCase = np.asarray(intermediate_weights[0][0] ) _UpperCAmelCase = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , ) # intermediate dense _UpperCAmelCase = np.asarray(intermediate_weights[1][0] ) _UpperCAmelCase = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , ) # intermediate out _UpperCAmelCase = np.asarray(intermediate_weights[4][0] ) _UpperCAmelCase = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , ) def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]: '''simple docstring''' # reformer model _UpperCAmelCase = torch_model.reformer # word embeds _UpperCAmelCase = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(_UpperCAmelCase ) , ) if isinstance(weights[3] , _UpperCAmelCase ): _UpperCAmelCase = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): _UpperCAmelCase = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F"{position_embeddings[emb_idx]} emb does not match" _UpperCAmelCase = nn.Parameter(torch.tensor(_UpperCAmelCase ) ) _UpperCAmelCase = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( _UpperCAmelCase ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): _UpperCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # output layer norm _UpperCAmelCase = np.asarray(weights[7][0] ) _UpperCAmelCase = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , ) # output embeddings _UpperCAmelCase = np.asarray(weights[9][0] ) _UpperCAmelCase = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , ) def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ) -> Optional[int]: '''simple docstring''' # Initialise PyTorch model _UpperCAmelCase = ReformerConfig.from_json_file(_UpperCAmelCase ) print(F"Building PyTorch model from configuration: {config}" ) _UpperCAmelCase = ReformerModelWithLMHead(_UpperCAmelCase ) with open(_UpperCAmelCase , 'rb' ) as f: _UpperCAmelCase = pickle.load(_UpperCAmelCase )['weights'] set_model_weights_in_torch(_UpperCAmelCase , _UpperCAmelCase , config.hidden_size ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained Reformer model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCAmelCase__ = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
339
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__) class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ): UpperCamelCase = None UpperCamelCase = None class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ): UpperCamelCase = datasets.Audio() UpperCamelCase = '''audio''' UpperCamelCase = AudioFolderConfig UpperCamelCase = 42 # definition at the bottom of the script UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' ) UpperCAmelCase__ = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] UpperCAmelCase__ = AUDIO_EXTENSIONS
339
1
import argparse import os import re UpperCAmelCase__ = "src/diffusers" # Pattern that looks at the indentation in a line. UpperCAmelCase__ = re.compile(r"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. UpperCAmelCase__ = re.compile(r"^\s*\"([^\"]+)\":") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. UpperCAmelCase__ = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]") # Pattern that matches `"key",` and puts `key` in group 0. UpperCAmelCase__ = re.compile(r"^\s*\"([^\"]+)\",\s*$") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. UpperCAmelCase__ = re.compile(r"\[([^\]]+)\]") def A ( _UpperCAmelCase : Union[str, Any] ) -> Dict: '''simple docstring''' _UpperCAmelCase = _re_indent.search(_UpperCAmelCase ) return "" if search is None else search.groups()[0] def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str="" , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Union[str, Any]=None ) -> str: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(_UpperCAmelCase ): index += 1 _UpperCAmelCase = ['\n'.join(lines[:index] )] else: _UpperCAmelCase = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _UpperCAmelCase = [lines[index]] index += 1 while index < len(_UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(_UpperCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(_UpperCAmelCase ) ) if index < len(_UpperCAmelCase ) - 1: _UpperCAmelCase = [lines[index + 1]] index += 1 else: _UpperCAmelCase = [] else: blocks.append('\n'.join(_UpperCAmelCase ) ) _UpperCAmelCase = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_UpperCAmelCase ) > 0: blocks.append('\n'.join(_UpperCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_UpperCAmelCase ): blocks.append('\n'.join(lines[index:] ) ) return blocks def A ( _UpperCAmelCase : Any ) -> Tuple: '''simple docstring''' def _inner(_UpperCAmelCase : Optional[int] ): return key(_UpperCAmelCase ).lower().replace('_' , '' ) return _inner def A ( _UpperCAmelCase : str , _UpperCAmelCase : int=None ) -> Optional[Any]: '''simple docstring''' # If no key is provided, we use a noop. def noop(_UpperCAmelCase : Optional[int] ): return x if key is None: _UpperCAmelCase = noop # Constants are all uppercase, they go first. _UpperCAmelCase = [obj for obj in objects if key(_UpperCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _UpperCAmelCase = [obj for obj in objects if key(_UpperCAmelCase )[0].isupper() and not key(_UpperCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. _UpperCAmelCase = [obj for obj in objects if not key(_UpperCAmelCase )[0].isupper()] _UpperCAmelCase = ignore_underscore(_UpperCAmelCase ) return sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]: '''simple docstring''' # This inner function sort imports between [ ]. def _replace(_UpperCAmelCase : Union[str, Any] ): _UpperCAmelCase = match.groups()[0] if "," not in imports: return F"[{imports}]" _UpperCAmelCase = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _UpperCAmelCase = keys[:-1] return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(_UpperCAmelCase )] ) + "]" _UpperCAmelCase = import_statement.split('\n' ) if len(_UpperCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _UpperCAmelCase = 2 if lines[1].strip() == '[' else 1 _UpperCAmelCase = [(i, _re_strip_line.search(_UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _UpperCAmelCase = sort_objects(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] ) _UpperCAmelCase = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_UpperCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _UpperCAmelCase = _re_bracket_content.sub(_replace , lines[1] ) else: _UpperCAmelCase = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _UpperCAmelCase = keys[:-1] _UpperCAmelCase = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(_UpperCAmelCase )] ) return "\n".join(_UpperCAmelCase ) else: # Finally we have to deal with imports fitting on one line _UpperCAmelCase = _re_bracket_content.sub(_replace , _UpperCAmelCase ) return import_statement def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple=True ) -> Optional[int]: '''simple docstring''' with open(_UpperCAmelCase , 'r' ) as f: _UpperCAmelCase = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _UpperCAmelCase = split_code_in_indented_blocks( _UpperCAmelCase , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_UpperCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _UpperCAmelCase = main_blocks[block_idx] _UpperCAmelCase = block.split('\n' ) # Get to the start of the imports. _UpperCAmelCase = 0 while line_idx < len(_UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _UpperCAmelCase = len(_UpperCAmelCase ) else: line_idx += 1 if line_idx >= len(_UpperCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. _UpperCAmelCase = '\n'.join(block_lines[line_idx:-1] ) _UpperCAmelCase = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _UpperCAmelCase = split_code_in_indented_blocks(_UpperCAmelCase , indent_level=_UpperCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend _UpperCAmelCase = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _UpperCAmelCase = [(pattern.search(_UpperCAmelCase ).groups()[0] if pattern.search(_UpperCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _UpperCAmelCase = [(i, key) for i, key in enumerate(_UpperCAmelCase ) if key is not None] _UpperCAmelCase = [x[0] for x in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _UpperCAmelCase = 0 _UpperCAmelCase = [] for i in range(len(_UpperCAmelCase ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: _UpperCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(_UpperCAmelCase ) count += 1 # And we put our main block back together with its first and last line. _UpperCAmelCase = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(_UpperCAmelCase ): if check_only: return True else: print(F"Overwriting {file}." ) with open(_UpperCAmelCase , 'w' ) as f: f.write('\n'.join(_UpperCAmelCase ) ) def A ( _UpperCAmelCase : List[Any]=True ) -> Tuple: '''simple docstring''' _UpperCAmelCase = [] for root, _, files in os.walk(_UpperCAmelCase ): if "__init__.py" in files: _UpperCAmelCase = sort_imports(os.path.join(_UpperCAmelCase , '__init__.py' ) , check_only=_UpperCAmelCase ) if result: _UpperCAmelCase = [os.path.join(_UpperCAmelCase , '__init__.py' )] if len(_UpperCAmelCase ) > 0: raise ValueError(F"Would overwrite {len(_UpperCAmelCase )} files, run `make style`." ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") UpperCAmelCase__ = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
339
import sys from collections import defaultdict class __lowerCAmelCase : def __init__( self : int) -> str: """simple docstring""" _UpperCAmelCase = [] def _lowerCamelCase ( self : Any , A : List[str]) -> int: """simple docstring""" return self.node_position[vertex] def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = pos def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _UpperCAmelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _UpperCAmelCase = 2 * start + 1 else: _UpperCAmelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: _UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child] _UpperCAmelCase , _UpperCAmelCase = ( heap[start], positions[start], ) _UpperCAmelCase , _UpperCAmelCase = temp, tempa _UpperCAmelCase = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , A) self.top_to_bottom(A , A , A , A) def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any: """simple docstring""" _UpperCAmelCase = position[index] while index != 0: _UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: _UpperCAmelCase = heap[parent] _UpperCAmelCase = position[parent] self.set_position(position[parent] , A) else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , A) break _UpperCAmelCase = parent else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , 0) def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str: """simple docstring""" _UpperCAmelCase = len(A) // 2 - 1 for i in range(A , -1 , -1): self.top_to_bottom(A , A , len(A) , A) def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = positions[0] _UpperCAmelCase = sys.maxsize self.top_to_bottom(A , 0 , len(A) , A) return temp def A ( _UpperCAmelCase : int ) -> Any: '''simple docstring''' _UpperCAmelCase = Heap() _UpperCAmelCase = [0] * len(_UpperCAmelCase ) _UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex _UpperCAmelCase = [] for vertex in range(len(_UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_UpperCAmelCase ) heap.node_position.append(_UpperCAmelCase ) _UpperCAmelCase = [] _UpperCAmelCase = 1 _UpperCAmelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: _UpperCAmelCase = 0 _UpperCAmelCase = distance heap.heapify(_UpperCAmelCase , _UpperCAmelCase ) for _ in range(1 , len(_UpperCAmelCase ) ): _UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _UpperCAmelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_UpperCAmelCase )] ): _UpperCAmelCase = distance heap.bottom_to_top( _UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase__ = int(input("Enter number of edges: ").strip()) UpperCAmelCase__ = defaultdict(list) for _ in range(edges_number): UpperCAmelCase__ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
339
1
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = SpeechTaTokenizer UpperCamelCase = False UpperCamelCase = True def _lowerCamelCase ( self : Optional[int]) -> List[str]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase = SpeechTaTokenizer(A) _UpperCAmelCase = AddedToken('<mask>' , lstrip=A , rstrip=A) _UpperCAmelCase = mask_token tokenizer.add_special_tokens({'mask_token': mask_token}) tokenizer.add_tokens(['<ctc_blank>']) tokenizer.save_pretrained(self.tmpdirname) def _lowerCamelCase ( self : Any , A : List[Any]) -> str: """simple docstring""" _UpperCAmelCase = 'this is a test' _UpperCAmelCase = 'this is a test' return input_text, output_text def _lowerCamelCase ( self : int , A : Dict , A : Any=False , A : Optional[Any]=20 , A : List[str]=5) -> List[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A) _UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A) _UpperCAmelCase = tokenizer.decode(A , clean_up_tokenization_spaces=A) return text, ids def _lowerCamelCase ( self : str) -> Optional[int]: """simple docstring""" _UpperCAmelCase = '<pad>' _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A) , A) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A) , A) def _lowerCamelCase ( self : Optional[int]) -> List[Any]: """simple docstring""" _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<s>') self.assertEqual(vocab_keys[1] , '<pad>') self.assertEqual(vocab_keys[-4] , 'œ') self.assertEqual(vocab_keys[-2] , '<mask>') self.assertEqual(vocab_keys[-1] , '<ctc_blank>') self.assertEqual(len(A) , 81) def _lowerCamelCase ( self : List[str]) -> int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 79) def _lowerCamelCase ( self : int) -> Optional[int]: """simple docstring""" _UpperCAmelCase = self.get_tokenizers(do_lower_case=A) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}"): _UpperCAmelCase = tokenizer.vocab_size _UpperCAmelCase = len(A) self.assertNotEqual(A , 0) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) _UpperCAmelCase = ['aaaaa bbbbbb', 'cccccccccdddddddd'] _UpperCAmelCase = tokenizer.add_tokens(A) _UpperCAmelCase = tokenizer.vocab_size _UpperCAmelCase = len(A) self.assertNotEqual(A , 0) self.assertEqual(A , A) self.assertEqual(A , len(A)) self.assertEqual(A , all_size + len(A)) _UpperCAmelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=A) self.assertGreaterEqual(len(A) , 4) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1) _UpperCAmelCase = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'} _UpperCAmelCase = tokenizer.add_special_tokens(A) _UpperCAmelCase = tokenizer.vocab_size _UpperCAmelCase = len(A) self.assertNotEqual(A , 0) self.assertEqual(A , A) self.assertEqual(A , len(A)) self.assertEqual(A , all_size_a + len(A)) _UpperCAmelCase = tokenizer.encode( '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=A) self.assertGreaterEqual(len(A) , 6) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1) self.assertGreater(tokens[0] , tokens[1]) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1) self.assertGreater(tokens[-3] , tokens[-4]) self.assertEqual(tokens[0] , tokenizer.eos_token_id) self.assertEqual(tokens[-3] , tokenizer.pad_token_id) def _lowerCamelCase ( self : Tuple) -> Any: """simple docstring""" pass def _lowerCamelCase ( self : Tuple) -> List[Any]: """simple docstring""" pass def _lowerCamelCase ( self : List[str]) -> str: """simple docstring""" _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = tokenizer.tokenize('This is a test') # fmt: off self.assertListEqual(A , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't']) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(A) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) _UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( A , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.']) _UpperCAmelCase = tokenizer.convert_tokens_to_ids(A) # fmt: off self.assertListEqual(A , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26]) # fmt: on _UpperCAmelCase = tokenizer.convert_ids_to_tokens(A) self.assertListEqual( A , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.']) @slow def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = [ 'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ' 'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ' 'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ' 'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.', 'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ' 'conditioning on both left and right context in all layers.', 'The quick brown fox jumps over the lazy dog.', ] # fmt: off _UpperCAmelCase = { 'input_ids': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=A , )
339
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]: '''simple docstring''' # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>' ) == 1 _UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1 _UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple _UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _UpperCAmelCase = logits[0, masked_index, :] _UpperCAmelCase = logits.softmax(dim=0 ) _UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 ) _UpperCAmelCase = ' '.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] ) _UpperCAmelCase = tokenizer.mask_token _UpperCAmelCase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ): _UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' ) if " {0}".format(_UpperCAmelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base") UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() UpperCAmelCase__ = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
339
1
# flake8: noqa # Lint as: python3 UpperCAmelCase__ = [ "VerificationMode", "Version", "disable_progress_bar", "enable_progress_bar", "is_progress_bar_enabled", "experimental", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
339
import math import unittest def A ( _UpperCAmelCase : int ) -> bool: '''simple docstring''' assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" self.assertTrue(is_prime(2)) self.assertTrue(is_prime(3)) self.assertTrue(is_prime(5)) self.assertTrue(is_prime(7)) self.assertTrue(is_prime(11)) self.assertTrue(is_prime(13)) self.assertTrue(is_prime(17)) self.assertTrue(is_prime(19)) self.assertTrue(is_prime(23)) self.assertTrue(is_prime(29)) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" with self.assertRaises(A): is_prime(-19) self.assertFalse( is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , ) self.assertFalse( is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , ) self.assertFalse(is_prime(2 * 2)) self.assertFalse(is_prime(2 * 3)) self.assertFalse(is_prime(3 * 3)) self.assertFalse(is_prime(3 * 5)) self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": unittest.main()
339
1
from __future__ import annotations def A ( _UpperCAmelCase : list ) -> float: '''simple docstring''' if not nums: raise ValueError('List is empty' ) return sum(_UpperCAmelCase ) / len(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : str) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=A , hypotheses=A , min_len=A , max_len=A) }
339
1
def A ( _UpperCAmelCase : list[int] ) -> int: '''simple docstring''' if not numbers: return 0 if not isinstance(_UpperCAmelCase , (list, tuple) ) or not all( isinstance(_UpperCAmelCase , _UpperCAmelCase ) for number in numbers ): raise ValueError('numbers must be an iterable of integers' ) _UpperCAmelCase = _UpperCAmelCase = _UpperCAmelCase = numbers[0] for i in range(1 , len(_UpperCAmelCase ) ): # update the maximum and minimum subarray products _UpperCAmelCase = numbers[i] if number < 0: _UpperCAmelCase , _UpperCAmelCase = min_till_now, max_till_now _UpperCAmelCase = max(_UpperCAmelCase , max_till_now * number ) _UpperCAmelCase = min(_UpperCAmelCase , min_till_now * number ) # update the maximum product found till now _UpperCAmelCase = max(_UpperCAmelCase , _UpperCAmelCase ) return max_prod
339
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer UpperCAmelCase__ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast UpperCAmelCase__ = TaTokenizerFast UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys UpperCAmelCase__ = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
339
1
import cmath import math def A ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> complex: '''simple docstring''' _UpperCAmelCase = math.radians(_UpperCAmelCase ) _UpperCAmelCase = math.radians(_UpperCAmelCase ) # Convert voltage and current to rectangular form _UpperCAmelCase = cmath.rect(_UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = cmath.rect(_UpperCAmelCase , _UpperCAmelCase ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
339
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class __lowerCAmelCase ( A ): UpperCamelCase = '''open-llama''' def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = intermediate_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = rms_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = kwargs.pop( 'use_memorry_efficient_attention' , A) _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_dropout_prob _UpperCAmelCase = use_stable_embedding _UpperCAmelCase = shared_input_output_embedding _UpperCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , ) def _lowerCamelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F"got {self.rope_scaling}") _UpperCAmelCase = self.rope_scaling.get('type' , A) _UpperCAmelCase = self.rope_scaling.get('factor' , A) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
339
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCAmelCase__ = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def A ( _UpperCAmelCase : Dict ) -> Dict: '''simple docstring''' config.addinivalue_line( 'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' ) config.addinivalue_line( 'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' ) config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' ) config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' ) config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' ) config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' ) def A ( _UpperCAmelCase : Tuple ) -> List[str]: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_UpperCAmelCase ) def A ( _UpperCAmelCase : str ) -> Any: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main _UpperCAmelCase = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase ) def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> Tuple: '''simple docstring''' # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: _UpperCAmelCase = 0 # Doctest custom flag to ignore output. UpperCAmelCase__ = doctest.register_optionflag("IGNORE_RESULT") UpperCAmelCase__ = doctest.OutputChecker class __lowerCAmelCase ( A ): def _lowerCamelCase ( self : Dict , A : Dict , A : Any , A : Optional[Any]) -> List[Any]: """simple docstring""" if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , A , A , A) UpperCAmelCase__ = CustomOutputChecker UpperCAmelCase__ = HfDoctestModule UpperCAmelCase__ = HfDocTestParser
339
def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') ) def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = credit_card_number _UpperCAmelCase = 0 _UpperCAmelCase = len(_UpperCAmelCase ) - 2 for i in range(_UpperCAmelCase , -1 , -2 ): # double the value of every second digit _UpperCAmelCase = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 _UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(F"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(_UpperCAmelCase ) <= 16: print(F"{error_message} of its length." ) return False if not validate_initial_digits(_UpperCAmelCase ): print(F"{error_message} of its first two digits." ) return False if not luhn_validation(_UpperCAmelCase ): print(F"{error_message} it fails the Luhn check." ) return False print(F"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("4111111111111111") validate_credit_card_number("32323")
339
1
def A ( _UpperCAmelCase : list[int] ) -> float: '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError('List is empty' ) _UpperCAmelCase = sum(_UpperCAmelCase ) / len(_UpperCAmelCase ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
from functools import reduce UpperCAmelCase__ = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def A ( _UpperCAmelCase : str = N ) -> int: '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(_UpperCAmelCase ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
339
1
from ...configuration_utils import PretrainedConfig UpperCAmelCase__ = { "google/tapas-base-finetuned-sqa": ( "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json" ), "google/tapas-base-finetuned-wtq": ( "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json" ), "google/tapas-base-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json" ), "google/tapas-base-finetuned-tabfact": ( "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json" ), } class __lowerCAmelCase ( A ): UpperCamelCase = '''tapas''' def __init__( self : Optional[int] , A : Any=3_05_22 , A : Dict=7_68 , A : Union[str, Any]=12 , A : Any=12 , A : Any=30_72 , A : Optional[Any]="gelu" , A : Any=0.1 , A : Tuple=0.1 , A : List[str]=10_24 , A : Union[str, Any]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , A : Optional[int]=0.0_2 , A : int=1E-12 , A : int=0 , A : Optional[Any]=1_0.0 , A : Optional[Any]=0 , A : Dict=1.0 , A : Union[str, Any]=None , A : Optional[Any]=1.0 , A : int=False , A : List[str]=None , A : List[Any]=1.0 , A : List[str]=1.0 , A : List[Any]=False , A : Union[str, Any]=False , A : str="ratio" , A : Any=None , A : Optional[int]=None , A : Union[str, Any]=64 , A : int=32 , A : Dict=False , A : List[str]=True , A : Tuple=False , A : Optional[int]=False , A : Union[str, Any]=True , A : Tuple=False , A : Dict=None , A : List[str]=None , **A : Optional[Any] , ) -> Optional[Any]: """simple docstring""" super().__init__(pad_token_id=A , **A) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_sizes _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps # Fine-tuning task hyperparameters _UpperCAmelCase = positive_label_weight _UpperCAmelCase = num_aggregation_labels _UpperCAmelCase = aggregation_loss_weight _UpperCAmelCase = use_answer_as_supervision _UpperCAmelCase = answer_loss_importance _UpperCAmelCase = use_normalized_answer_loss _UpperCAmelCase = huber_loss_delta _UpperCAmelCase = temperature _UpperCAmelCase = aggregation_temperature _UpperCAmelCase = use_gumbel_for_cells _UpperCAmelCase = use_gumbel_for_aggregation _UpperCAmelCase = average_approximation_function _UpperCAmelCase = cell_selection_preference _UpperCAmelCase = answer_loss_cutoff _UpperCAmelCase = max_num_rows _UpperCAmelCase = max_num_columns _UpperCAmelCase = average_logits_per_cell _UpperCAmelCase = select_one_column _UpperCAmelCase = allow_empty_column_selection _UpperCAmelCase = init_cell_selection_weights_to_zero _UpperCAmelCase = reset_position_index_per_cell _UpperCAmelCase = disable_per_token_loss # Aggregation hyperparameters _UpperCAmelCase = aggregation_labels _UpperCAmelCase = no_aggregation_label_index if isinstance(self.aggregation_labels , A): _UpperCAmelCase = {int(A): v for k, v in aggregation_labels.items()}
339
from __future__ import annotations from collections.abc import Callable UpperCAmelCase__ = list[list[float | int]] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for row in range(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = matrix[row][col] _UpperCAmelCase = vector[row][0] _UpperCAmelCase = 0 _UpperCAmelCase = 0 while row < size and col < size: # pivoting _UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: _UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , _UpperCAmelCase ): _UpperCAmelCase = augmented[rowa][col] / augmented[row][col] _UpperCAmelCase = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , _UpperCAmelCase ): for row in range(_UpperCAmelCase ): _UpperCAmelCase = augmented[row][col] / augmented[col][col] for cola in range(_UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase ) ] def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for x_val, y_val in enumerate(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = (x_val + 1) ** (size - col - 1) _UpperCAmelCase = y_val _UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase ) def interpolated_func(_UpperCAmelCase : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(_UpperCAmelCase ) ) return interpolated_func def A ( _UpperCAmelCase : int ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int: '''simple docstring''' _UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )] _UpperCAmelCase = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] _UpperCAmelCase = 0 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for poly in polynomials: _UpperCAmelCase = 1 while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ): x_val += 1 ret += poly(_UpperCAmelCase ) return ret if __name__ == "__main__": print(f"""{solution() = }""")
339
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __lowerCAmelCase ( A ): UpperCamelCase = ['''image_processor''', '''tokenizer'''] UpperCamelCase = '''AutoImageProcessor''' UpperCamelCase = '''AutoTokenizer''' def __init__( self : str , A : Optional[Any] , A : Optional[int]) -> Tuple: """simple docstring""" super().__init__(A , A) _UpperCAmelCase = self.image_processor def __call__( self : Tuple , A : List[Any]=None , A : Any=None , A : Dict=None , **A : List[Any]) -> Tuple: """simple docstring""" if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: _UpperCAmelCase = self.tokenizer(A , return_tensors=A , **A) if images is not None: _UpperCAmelCase = self.image_processor(A , return_tensors=A , **A) if text is not None and images is not None: _UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A) , tensor_type=A) def _lowerCamelCase ( self : Dict , *A : Union[str, Any] , **A : Tuple) -> Union[str, Any]: """simple docstring""" return self.tokenizer.batch_decode(*A , **A) def _lowerCamelCase ( self : int , *A : List[str] , **A : List[Any]) -> Union[str, Any]: """simple docstring""" return self.tokenizer.decode(*A , **A) @property def _lowerCamelCase ( self : Union[str, Any]) -> int: """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
339
from __future__ import annotations def A ( _UpperCAmelCase : list[int] ) -> bool: '''simple docstring''' return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__( self : Union[str, Any] , A : Optional[int] , A : Dict=13 , A : int=3 , A : Optional[int]=2_24 , A : Dict=30 , A : Any=4_00 , A : int=True , A : List[Any]=None , A : Optional[Any]=True , A : Tuple=[0.5, 0.5, 0.5] , A : Optional[Any]=[0.5, 0.5, 0.5] , ) -> str: """simple docstring""" _UpperCAmelCase = size if size is not None else {'height': 18, 'width': 18} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = ViTImageProcessor if is_vision_available() else None def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = EfficientFormerImageProcessorTester(self) @property def _lowerCamelCase ( self : Any) -> List[Any]: """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def _lowerCamelCase ( self : Optional[Any]) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(A , 'image_mean')) self.assertTrue(hasattr(A , 'image_std')) self.assertTrue(hasattr(A , 'do_normalize')) self.assertTrue(hasattr(A , 'do_resize')) self.assertTrue(hasattr(A , 'size')) def _lowerCamelCase ( self : Any) -> str: """simple docstring""" pass def _lowerCamelCase ( self : List[str]) -> Optional[int]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=A) for image in image_inputs: self.assertIsInstance(A , Image.Image) # Test not batched input _UpperCAmelCase = image_processor(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processor(A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) def _lowerCamelCase ( self : Any) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=A , numpify=A) for image in image_inputs: self.assertIsInstance(A , np.ndarray) # Test not batched input _UpperCAmelCase = image_processor(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processor(A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) def _lowerCamelCase ( self : Optional[Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=A , torchify=A) for image in image_inputs: self.assertIsInstance(A , torch.Tensor) # Test not batched input _UpperCAmelCase = image_processor(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processor(A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , )
339
import os UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} def A ( _UpperCAmelCase : str ) -> int: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(_UpperCAmelCase ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A ( _UpperCAmelCase : int ) -> str: '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1_000 numerals += m_count * "M" num %= 1_000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int: '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase ) _UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase ) savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase ) return savings if __name__ == "__main__": print(f"""{solution() = }""")
339
1
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests UpperCAmelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly
339
import requests from bsa import BeautifulSoup def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str: '''simple docstring''' _UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' ) _UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} ) _UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' ) return anchors[2].get_text() if __name__ == "__main__": UpperCAmelCase__ = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
339
1